blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
de24b948950fea8fe059ee56e24e0d2eb0f95681
|
Python
|
parduman/lovebabbar450
|
/tree/inorderTreeTraversal.py
|
UTF-8
| 716 | 4.09375 | 4 |
[] |
no_license
|
class Node:
def __init__(self, data=None, left=None, right=None):
self.data = data
self.left = left
self.right = right
def inorderTraversalRecursion (node):
if not node:
return
if node.left:
inorderTraversalRecursion(node.left)
print(node.data, end=' ')
if node.right:
inorderTraversalRecursion(node.right)
def inorderTaraversalIteration(node):
pass
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.right.left = Node(5)
root.right.right = Node(6)
root.right.left.left = Node(7)
root.right.left.right = Node(8)
# 4 2 1 7 5 8 3 6
inorderTraversalRecursion(root)
| true |
fcf2ffff2f47aad7c4a85fcb80bab0498461b513
|
Python
|
GMwang550146647/network
|
/0.leetcode/3.刷题/3.数学/???.素数(core).py
|
UTF-8
| 1,332 | 3.859375 | 4 |
[] |
no_license
|
from fundamentals.test_time import test_time
import math
"""
寻找 [2,n)范围的素数
"""
class Solution():
def __init__(self):
pass
@test_time
def countPrimes(self, n):
"""
从2 计算到 n**0.5
"""
def isPrime(n):
i = 2
while (i ** 2 <= n):
if n % i == 0:
return False
i += 1
return True
count = 0
primes = []
for i in range(2, n):
if isPrime(i):
count += 1
primes.append(i)
return count, primes
@test_time
def countPrimes_Sieve_of_Eratoshthenes(self, n):
"""
从2 计算到 n**0.5,每找到一个素数,马上将其在n之前的倍数干掉!
"""
isPrime = [True for _ in range(n)]
for i in range(2, math.ceil(n ** 0.5) + 1):
if (isPrime[i]):
for j in range(i ** 2, n, i):
isPrime[j] = False
count = 0
for i in range(2, n):
if isPrime[i]:
count += 1
return count
def main(self):
nums = 11
self.countPrimes(nums)
self.countPrimes_Sieve_of_Eratoshthenes(nums)
if __name__ == '__main__':
SL = Solution()
SL.main()
| true |
e66c1c3d76f90c1475fd11f5ce1825d9165f0c70
|
Python
|
VINITHAKO/pro
|
/41.py
|
UTF-8
| 486 | 2.640625 | 3 |
[] |
no_license
|
c1,d1=input().split()
c1=int(c1)
d1=int(d1)
m2=''
n1=2
if(c1+d1<=3):
for i in range(0,c1+d1):
if(i%2!=0):
m2=m2+'0'
else:
m2=m2+'1'
else:
for i in range(0,c1+d1):
if(i==n1):
m2=m2+'0'
if(n1==d1):
n1=n1+2
else:
n1=n1+3
else:
m2=m2+'1'
y1=len(m2)-1
if(int(m2[y1])==0):
print('-1')
elif c1==1 and d1==2: print("011")
else:
print(m2)
| true |
126190f5d09e97abf7e73a67cba64c4aa9166423
|
Python
|
baranbbr/MinecraftChatbot
|
/inputConsole.py
|
UTF-8
| 476 | 2.890625 | 3 |
[] |
no_license
|
# This file is for testing the code within the console.
import meaning
from ChatBot import inputRefactor
from ChatBot import messageHandler
refactorer = inputRefactor.InputRefactor()
meaning = meaning.Meaning()
messageHandler = messageHandler.MessageHandler()
while True:
msg = input("Talk to me: ")
refactored_input = refactorer.nonLetterRemover(msg)
refactored_input = refactorer.tokenise(refactored_input)
print(messageHandler.generate_response(msg, 1))
| true |
5bed25b7b9f52ea2b2ee15b373e8db7b69f4dab5
|
Python
|
OrganizaP/Python
|
/Sort algorithms/bubble.py
|
UTF-8
| 373 | 3.71875 | 4 |
[] |
no_license
|
import numpy as np
a = -12.
b = 12.
N = 4
# Generates a random array of N elements between a and b
v = np.random.rand(N)*(b-a) + a
#print (v)
# Bubble sort (ascending order)
while(True):
count = 0
print (v)
for i in range(N-1): # Starts with 0 to N-1
if (v[i+1]<v[i]):
v[i], v[i+1] = v[i+1], v[i]
count += 1
if (count == 0):
break
| true |
9243e309e7a7298be833c3b64ad750839dc6fd89
|
Python
|
kimkh0930/practice
|
/python/my_first_module/my_email.py
|
UTF-8
| 333 | 3.078125 | 3 |
[
"MIT"
] |
permissive
|
class Email:
def __init__(self):
self.from_email = ''
self.to_email = ''
self.subject = ''
self.contents = ''
def send_mail(self):
print('From: '+ self.from_email)
print('To: '+ self.to_email)
print('Subject: '+ self.subject)
print('Contents: '+ self.contents)
| true |
a655371794ad7a18d94f259cd561d30b75c85ed1
|
Python
|
guyBy/PythonWEB
|
/Lesson1/BasicVars.py
|
UTF-8
| 588 | 3.515625 | 4 |
[] |
no_license
|
i = 7
print(f'i = {i}')
i = i + 5
print(f'i after add = {i}')
i -= 5
print(f'i after subtruct = {i}')
i /= 2
# note that divide i converted to float
print(f'i after divide = {i}')
i *= 4
print(f'i after multiply = {i}')
# integer divide as float
print(f'i after integer divide i = {i}, integer divide = {i // 3},'
f' normal divide {i / 3:.2f}')
# demonstrate modulo
print(f'modulo: i = {i}, integer divide i//3:{i//3}, reminder i%3: {i%3}')
i = int(i)
print(f'i after convert to int = {i}')
j = 7.0
print(f'j as float = {j}')
my_name = 'Doron'
print(f'my_name as str = {my_name}')
| true |
865c9c5272e70dcf34c3fd89acdcd33e4d463d16
|
Python
|
tfboyd/benchmark_harness
|
/oss_bench/tools/nvidia_test.py
|
UTF-8
| 2,108 | 2.5625 | 3 |
[
"Apache-2.0"
] |
permissive
|
"""Tests nvidia_tools module."""
import unittest
from mock import patch
import tools.nvidia as nvidia
class TestNvidiaTools(unittest.TestCase):
@patch('tools.local_command.run_local_command')
def test_get_gpu_info(self, run_local_command_mock):
"""Tests get gpu info parses expected value into expected components."""
run_local_command_mock.return_value = [0, 'blah blah\n381.99, GTX 1080 \n']
driver, gpu_info = nvidia.get_gpu_info()
self.assertEqual('381.99', driver)
self.assertEqual('GTX 1080', gpu_info)
@patch('tools.local_command.run_local_command')
def test_get_gpu_info_quadro(self, run_local_command_mock):
"""Tests gpu info returns second entry if first entry is a Quadro."""
run_local_command_mock.return_value = [
0, 'blah\n200.99, Quadro K900 \n381.99, GTX 1080\n'
]
driver, gpu_info = nvidia.get_gpu_info()
self.assertEqual('381.99', driver)
self.assertEqual('GTX 1080', gpu_info)
@patch('tools.local_command.run_local_command')
def test_get_gpu_count(self, run_local_command_mock):
"""Tests gpu info returns second entry if first entry is a Quadro."""
run_local_command_mock.return_value = [
0, 'blah\n200.99, Quadro K900 \n381.99, GTX 1080\n'
]
gpu_count = nvidia.get_gpu_count()
self.assertEqual(2, gpu_count)
@patch('tools.local_command.run_local_command')
def test_is_ok_to_run_false(self, run_local_command_mock):
"""Tests ok_to_run finding existing processes."""
smi_test = 'tools/test_files/example_nvidia-smi_processes.txt'
with open(smi_test) as f:
run_local_command_mock.return_value = [0, f.read()]
ok_to_run = nvidia.is_ok_to_run()
self.assertFalse(ok_to_run)
@patch('tools.local_command.run_local_command')
def test_is_ok_to_run(self, run_local_command_mock):
"""Tests ok_to_run not finding existing processes."""
smi_test = 'tools/test_files/example_nvidia-smi_no_processes.txt'
with open(smi_test) as f:
run_local_command_mock.return_value = [0, f.read()]
ok_to_run = nvidia.is_ok_to_run()
self.assertTrue(ok_to_run)
| true |
f8454778ab3ac778df40d217b756945d4412d0be
|
Python
|
cveazey/ProjectEuler
|
/2/e2.py
|
UTF-8
| 311 | 3.453125 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
import itertools
def fib(upper):
seq = [1, 1]
while True:
next = seq[-2] + seq[-1]
if next > upper:
break
seq.append(next)
return seq
def main():
s = sum(itertools.ifilterfalse(lambda x: x%2, fib(4000000)))
print('sum is {0}'.format(s))
if __name__ == '__main__':
main()
| true |
a2fad8c396bf2c095340d89b11ea1a4bb2fb794d
|
Python
|
daniel-reich/ubiquitous-fiesta
|
/g3BokS6KZgyYT8Hjm_22.py
|
UTF-8
| 142 | 3.5 | 4 |
[] |
no_license
|
def shift_to_left(x, y, ans=2):
if y == 0:
return x
if y == 1:
return x*ans
return shift_to_left(x, y-1, 2*ans)
| true |
f796b537c9853da3fee4deb1a61714178fbba997
|
Python
|
grafana/graphite-web
|
/webapp/graphite/intervals.py
|
UTF-8
| 4,533 | 3.203125 | 3 |
[
"Apache-2.0"
] |
permissive
|
INFINITY = float('inf')
NEGATIVE_INFINITY = -INFINITY
class IntervalSet:
def __init__(self, intervals, disjoint=False):
self.intervals = intervals
if not disjoint:
self.intervals = union_overlapping(self.intervals)
self.size = sum(i.size for i in self.intervals)
def __repr__(self):
return repr(self.intervals)
def __iter__(self):
return iter(self.intervals)
def __len__(self):
return len(self.intervals)
def __getitem__(self, i):
return self.intervals[i]
def __nonzero__(self):
return self.size != 0
def __sub__(self, other):
return self.intersect( other.complement() )
def complement(self):
complementary = []
cursor = NEGATIVE_INFINITY
for interval in self.intervals:
if cursor < interval.start:
complementary.append( Interval(cursor, interval.start) )
cursor = interval.end
if cursor < INFINITY:
complementary.append( Interval(cursor, INFINITY) )
return IntervalSet(complementary, disjoint=True)
def intersect(self, other): #XXX The last major bottleneck. Factorial-time hell.
# Then again, this function is entirely unused...
if (not self) or (not other):
return IntervalSet([])
#earliest = max(self.intervals[0].start, other.intervals[0].start)
#latest = min(self.intervals[-1].end, other.intervals[-1].end)
#mine = [i for i in self.intervals if i.start >= earliest and i.end <= latest]
#theirs = [i for i in other.intervals if i.start >= earliest and i.end <= latest]
intersections = [x for x in (i.intersect(j)
for i in self.intervals
for j in other.intervals)
if x]
return IntervalSet(intersections, disjoint=True)
def intersect_interval(self, interval):
intersections = [x for x in (i.intersect(interval)
for i in self.intervals)
if x]
return IntervalSet(intersections, disjoint=True)
def union(self, other):
return IntervalSet( sorted(self.intervals + other.intervals) )
class Interval:
def __init__(self, start, end):
if end - start < 0:
raise ValueError("Invalid interval start=%s end=%s" % (start, end))
self.start = start
self.end = end
self.tuple = (start, end)
self.size = self.end - self.start
def __eq__(self, other):
return self.tuple == other.tuple
def __ne__(self, other):
return self.tuple != other.tuple
def __hash__(self):
return hash( self.tuple )
def __lt__(self, other):
return self.start < self.start
def __le__(self, other):
return self.start <= self.start
def __gt__(self, other):
return self.start > self.start
def __ge__(self, other):
return self.start >= self.start
def __cmp__(self, other):
return (self.start > other.start) - (self.start < other.start)
def __len__(self):
raise TypeError("len() doesn't support infinite values, use the 'size' attribute instead")
def __nonzero__(self): # Python 2
return self.size != 0
def __bool__(self): # Python 3
return self.size != 0
def __repr__(self):
return '<Interval: %s>' % str(self.tuple)
def intersect(self, other):
start = max(self.start, other.start)
end = min(self.end, other.end)
if end > start:
return Interval(start, end)
def overlaps(self, other):
earlier = self if self.start <= other.start else other
later = self if earlier is other else other
return earlier.end >= later.start
def union(self, other):
if not self.overlaps(other):
raise TypeError("Union of disjoint intervals is not an interval")
start = min(self.start, other.start)
end = max(self.end, other.end)
return Interval(start, end)
def union_overlapping(intervals):
"""Union any overlapping intervals in the given set."""
disjoint_intervals = []
for interval in intervals:
if disjoint_intervals and disjoint_intervals[-1].overlaps(interval):
disjoint_intervals[-1] = disjoint_intervals[-1].union(interval)
else:
disjoint_intervals.append(interval)
return disjoint_intervals
| true |
c5e489f48afc28e64eb57caad63e4c5bbd5cfbde
|
Python
|
Rinatgi/budget
|
/ui.py
|
UTF-8
| 456 | 3.453125 | 3 |
[] |
no_license
|
''' Программа контроля
рахода семейного бюджета'''
from tkinter import Tk
def create_window():
window = Tk()
# ширина экрана
w = window.winfo_screenwidth()
# высота экрана
h = window.winfo_screenheight()
window.title('Бюджет семьи')
window.geometry('{}x{}'.format(w, h))
window.minsize(width=780, height=580)
return window
| true |
1973ebf240faee2002e0cdb0dae36834daccfd6c
|
Python
|
ramsaran-vuppuluri/LearningPythonHardway
|
/learnPythonTheHardWayEx16.py
|
UTF-8
| 577 | 3.875 | 4 |
[] |
no_license
|
fileName = raw_input("Enter file path")
print "We're going to erase %r."%fileName
print "If you don't wasnt press CTRL+C (^+C)."
print "If you want to continue press RETURN."
raw_input("?")
print "Opening the file"
file = open(fileName, 'w')
print "Truncating the file"
file.truncate()
print "Now I'm going to ask you for three lines:"
line1 = raw_input("Line 1: ")
line2 = raw_input("Line 2: ")
line3 = raw_input("Line 3: ")
print "I'm going to write these lines to the files"
file.write(line1+"\n"+line2+"\n"+line3+"\n")
print "And finally, we close it."
file.close
| true |
f5b9bc8b1bb57c8aafa9a734491bebfd3387ddcf
|
Python
|
dhairyap99/TileMatchingGame
|
/app.py
|
UTF-8
| 2,156 | 3.15625 | 3 |
[] |
no_license
|
#importing the pygame module and specific modules of display and event from pygame
import pygame
import game_config as gc
from animal import Animal
from time import sleep
from pygame import display, event, image
def find_index(x, y):
row = y // gc.IMAGE_SIZE
col = x // gc.IMAGE_SIZE
index = row * gc.NUM_TILES_SIDE + col
return index
#initializing the module
pygame.init()
display.set_caption('My Game') #game title
screen = display.set_mode((512, 512)) #application window size
matched = image.load('other_assets/matched.png')
running = True #to control the game loop
tiles = [Animal(i) for i in range(0, gc.NUM_TILES_TOTAL)]
current_images = []
#game loop begins
while running:
current_events = event.get()
for e in current_events:
if e.type == pygame.QUIT:
running = False
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_ESCAPE:
running = False
if e.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
#print(mouse_x, mouse_y)
index = find_index(mouse_x, mouse_y)
if index not in current_images:
current_images.append(index)
if len(current_images) > 2:
current_images = current_images[1:]
screen.fill((255, 255, 255))
total_skipped = 0
for _, tile in enumerate(tiles):
image_i = tile.image if tile.index in current_images else tile.box
if not tile.skip:
screen.blit(image_i, (tile.col * gc.IMAGE_SIZE + gc.MARGIN, tile.row * gc.IMAGE_SIZE + gc.MARGIN))
else:
total_skipped += 1
display.flip()
if len(current_images) == 2:
idx1, idx2 = current_images
if tiles[idx1].name == tiles[idx2].name:
tiles[idx1].skip = True
tiles[idx2].skip = True
sleep(0.4)
screen.blit(matched, (0,0))
display.flip()
sleep(0.4)
current_images = []
if total_skipped == len(tiles):
running = False
display.flip()
print('Goodbye. We hope you enjoyed the game')
| true |
091784d69800243d54e98a3d73a6d1558792da31
|
Python
|
kateliev/TypeRig
|
/Lib/typerig/proxy/fl/objects/sampler.py
|
UTF-8
| 12,534 | 2.59375 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
# MODULE: Typerig / Proxy / Sampler (Objects)
# -----------------------------------------------------------
# (C) Vassil Kateliev, 2020 (http://www.kateliev.com)
# (C) Karandash Type Foundry (http://www.karandash.eu)
#------------------------------------------------------------
# www.typerig.com
# No warranties. By using this you agree
# that you use it at your own risk!
# - Dependencies ----------------------
from __future__ import print_function
import fontlab as fl6
import fontgate as fgt
import PythonQt as pqt
from typerig.core.objects.point import Point, Void
from typerig.proxy.fl.objects.base import Line, Curve
from typerig.core.func.math import linspread
# - Init -----------------------------
__version__ = '0.2.4'
# - Keep compatibility for basestring checks
try:
basestring
except NameError:
basestring = (str, bytes)
# - Classes --------------------------
class GlyphSampler(object):
'''Glyph sampler for area analysis.
Partially inspired by Huerta Tipografica Letterspacer (https://github.com/huertatipografica/HTLetterspacer)
Constructor:
GlyphSampler()
GlyphSampler(sample window, sample frequency)
Attributes:
.data_samples (dict) -> {glyph_name:{layer:(left_samples, mid_samples, right_samples)}}: Cached data of Glyph samples
.data_area (dict) -> {glyph_name:{layer:(left_area, mid_area, right_area)}}: Cached data of Glyph area regions
.sample_window (list/tuple) -> [min_y, max_y]: Window of scanning
.sample_frequency (int) : Sampling frequency
.sample_range range(window_min, window_max, sample_frequency): Sampling range
.sample_quantas list(int): Quantized sampling rage - the window is split in "sample_frequency" number of regions
.use_quantizer (bool): Use Quantized sampling range
.margin_growth (int): Grow margin outside the glyph BBoX
.cutout_x, .cutout_y: Cutout values defining how deep (x) or hight (y) the probing is done
Methods:
...
TODO: Italics - slat/deslat or slanted zones?!
'''
def __init__(self, sample_window=[0, 1000], sample_frequency=20):
self._delta_error = 100
self._practical_infinity = 10000
self._mask_layer_prefix = 'mask.'
self._mask_layer_suffix = '.area'
self.data_samples = {}
self.data_area = {}
self.use_quantizer = False
self.margin_growth = 0
self.cutout_x = 100
self.cutout_y = self._practical_infinity
self.updateRange(sample_window, sample_frequency)
# - Functions ---------------------
# - Range -------------------------
def updateRange(self, sample_window=None, sample_frequency=None):
if sample_window is not None: self.sample_window = [int(sample_window[0]), int(sample_window[1])]
if sample_frequency is not None: self.sample_frequency = int(sample_frequency)
# - Regular Range
self.sample_range = range(self.sample_window[0], self.sample_window[1], self.sample_frequency)
# - Quantized Range -> predefined number of samples = sample_frequency
self.sample_quantas = list(linspread(self.sample_window[0], self.sample_window[1], self.sample_frequency))
def getRange(self, quantized=False):
return self.sample_quantas if quantized else self.sample_range
# - Filters -----------------------
@staticmethod
def filterBandPass(point_list, cutout_depth=(10000, 10000), in_reverse=False):
min_x, max_x, min_y, max_y = GlyphSampler.getBounds(point_list)
cutout_depth_x, cutout_depth_y = cutout_depth
return_list = []
cutout_value_x = [min_x, max_x][in_reverse] + cutout_depth_x*[1, -1][in_reverse]
cutout_value_y = [min_y, max_y][in_reverse] + cutout_depth_y*[1, -1][in_reverse]
for p in point_list:
px, py = p.tuple
if [px > cutout_value_x, px < cutout_value_x][in_reverse]: px = cutout_value_x
if [py > cutout_value_y, py < cutout_value_y][in_reverse]: py = cutout_value_y
return_list.append(Point(px, py))
return return_list
@staticmethod
def filterClosePoly(point_list, in_reverse=False, grow_value=0):
min_x, max_x, min_y, max_y = GlyphSampler.getBounds(point_list)
x = min_x - grow_value if not in_reverse else max_x + grow_value
point_list.insert(0, Point(x, min_y))
point_list.append(Point(x, max_y))
return point_list
# - Getters ---------------------------------
@staticmethod
def getArea(point_list):
corners = len(point_list)
area = 0.0
for i in range(corners):
j = (i + 1) % corners
area += abs(point_list[i].x*point_list[j].y - point_list[j].x*point_list[i].y)
return area*0.5
@staticmethod
def getBounds(point_list):
min_x = min(point_list, key= lambda p: p.x).x
max_x = max(point_list, key= lambda p: p.x).x
min_y = min(point_list, key= lambda p: p.y).y
max_y = max(point_list, key= lambda p: p.y).y
return min_x, max_x, min_y, max_y
@staticmethod
def getContour(point_list, get_fg_contour=False):
new_contour = fl6.flContour([pqt.QtCore.QPointF(*p.tuple) for p in point_list], closed=True)
new_fg_contour = new_contour.convertToFgContour()
return new_contour if not get_fg_contour else new_fg_contour
# - Glyph sampling ------------------------------------
@staticmethod
def getSamples(glyph, layer, sampling_range):
layer_bounds = glyph.getBounds(layer)
layer_contours = glyph.contours(layer)
min_x = int(layer_bounds.x()) #- delta
max_x = int(layer_bounds.width() + min_x)
mid_x = (min_x + max_x)*0.5
max_x += 100 # Boundary error compensation?! TO FIX! Presumable problem with Line().hasPoint()
min_y = int(layer_bounds.y())
max_y = int(layer_bounds.height() + min_y)
ipoi_left, ipoi_right = [], [] # Intersections points of interest
probe_list = [Line((min_x, y), (max_x, y)) for y in sampling_range if min_y <= y <= max_y] # Create all horizontal slicing lines (proves)
prepare_segments = sum([contour.segments() for contour in layer_contours],[])
tr_segments = []
for segment in prepare_segments:
if segment.countPoints == 2:
temp_tr_segment = Line(segment)
elif segment.countPoints == 4:
temp_tr_segment = Curve(segment)
tr_segments.append(temp_tr_segment)
for probe in probe_list:
temp_probe_poi = []
for tr_segment in tr_segments:
intersection = tr_segment & probe
if len(intersection) and not isinstance(intersection, Void):
if isinstance(intersection, list):
temp_probe_poi += intersection
else:
temp_probe_poi.append(intersection)
if len(temp_probe_poi) >= 2:
ipoi_left.append(min(temp_probe_poi, key= lambda p: p.x))
ipoi_right.append(max(temp_probe_poi, key= lambda p: p.x))
elif len(temp_probe_poi) == 1: # Single intersection fix
qpoi = temp_probe_poi[0] # Questionable poi
if qpoi.x < mid_x:
ipoi_left.append(qpoi)
else:
ipoi_right.append(qpoi)
return ipoi_left, ipoi_right
def sampleGlyph(self, glyph, layer=None, cache_data=True):
# - Get initial data
layer_data = {}
layer_name = layer if layer is not None else glyph.layer(layer).name
sample_left, sample_right = self.getSamples(glyph, layer, self.getRange(self.use_quantizer))
# - Process samples
sample_left = GlyphSampler.filterBandPass(sample_left, (self.cutout_x, self.cutout_y), False)
sample_right = GlyphSampler.filterBandPass(sample_right, (self.cutout_x, self.cutout_y), True)
sample_mid = sample_left + list(reversed(sample_right))
sample_left = GlyphSampler.filterClosePoly(sample_left, False, self.margin_growth)
sample_right = GlyphSampler.filterClosePoly(sample_right, True, self.margin_growth)
layer_data[layer_name] = (sample_left, sample_mid, sample_right)
# - Cache
if cache_data: self.data_samples.setdefault(glyph.name, {}).update(layer_data)
return layer_data[layer_name]
def sampleGlyphArea(self, glyph, layer=None, resample=False, cache_data=True):
glyph_name = glyph.name
layer_name = layer if layer is not None else glyph.layer(layer).name
layer_area = {}
if self.data_samples.has_key(glyph_name) and not resample:
if self.data_samples[glyph_name].has_key(layer_name):
layer_data = self.data_samples[glyph_name][layer_name]
else:
layer_data = self.sampleGlyph(glyph, layer_name, True)
else:
layer_data = self.sampleGlyph(glyph, layer_name, True)
left, mid, right = layer_data
contour_left = GlyphSampler.getContour(left, True)
contour_mid = GlyphSampler.getContour(mid, True)
contour_right = GlyphSampler.getContour(right, True)
area_left = abs(contour_left.area())
area_mid = abs(contour_mid.area())
area_right = abs(contour_right.area())
layer_area[layer_name] = (area_left, area_mid, area_right)
if cache_data: self.data_area.setdefault(glyph_name, {}).update(layer_area)
return layer_area[layer_name]
# - Represent ----------------------------------------------------
def drawGlyphArea(self, glyph, layer=None):
glyph_name = glyph.name
layer_name = layer if layer is not None else glyph.layer(layer).name
mask_layer_name = self._mask_layer_prefix + layer_name + self._mask_layer_suffix
if self.data_samples.has_key(glyph_name) and self.data_samples[glyph_name].has_key(layer_name):
if glyph.hasLayer(mask_layer_name):
mask_layer = glyph.layer(mask_layer_name)
mask_layer.removeAllShapes()
else:
mask_layer = glyph.layer(layer_name).getMaskLayer(True)
mask_layer.name += self._mask_layer_suffix
left, mid, right = self.data_samples[glyph_name][layer_name]
contour_left = GlyphSampler.getContour(left, False)
contour_mid = GlyphSampler.getContour(mid, False)
contour_right = GlyphSampler.getContour(right, False)
new_shape = fl6.flShape()
new_shape.addContours([contour_left, contour_mid, contour_right], True)
mask_layer.addShape(new_shape)
else:
print('ABORT:\t Draw Area;\t Glyph: %s; Layer: %s;\tGlyphSampler data not found!' %(glyph_name, layer_name))
class MetricSampler(GlyphSampler):
'''Metric sampler for automatically generating glyph side-bearings based on negative space area.
Constructor:
GlyphSampler(font (pFont))
Attributes:
.data_samples (dict) -> {glyph_name:{layer:(left_samples, mid_samples, right_samples)}}: Cached data of Glyph samples
.data_area (dict) -> {glyph_name:{layer:(left_area, mid_area, right_area)}}: Cached data of Glyph area regions
.sample_window (list/tuple) -> [min_y, max_y]: Window of scanning
.sample_frequency (int) : Sampling frequency
.sample_range range(window_min, window_max, sample_frequency): Sampling range
.sample_quantas list(int): Quantized sampling rage - the window is split in "sample_frequency" number of regions
.use_quantizer (bool): Use Quantized sampling range
.margin_growth (int): Grow margin outside the glyph BBoX
.cutout_x, .cutout_y: Cutout values defining how deep (x) or hight (y) the probing is done
Methods:
.getGlyphSB(glyph (pGlyph), layer (Str), area_mult (Float), resample (Bool), draw (Bool))
'''
def __init__(self, p_font_object):
# - Init
self.font = p_font_object
self.metrics = self.font.fontMetrics()
# - Initialize sampler
font_descender_min = min([self.metrics.getDescender(layer) for layer in self.font.masters()])
font_ascender_max = max([self.metrics.getAscender(layer) for layer in self.font.masters()])
sample_window = (font_descender_min, font_ascender_max)
super(MetricSampler, self).__init__(sample_window)
self.cutout_x = 100
# - Functions ----------------------------
# - Modular/static -----------------------
@staticmethod
def getSB(area_tuple, area_mult, sample_window, x_height, font_upm):
left, mid, right = area_tuple
window_height = max(sample_window) - min(sample_window)
mid_prop = mid*area_mult
lsb = (mid_prop - abs(left))/window_height
rsb = (mid_prop - abs(right))/window_height
return lsb, rsb
# - Dynamic --------------------------------
def getGlyphSB(self, glyph, layer=None, area_mult=0.5, resample=False, draw=False):
glyph_name = glyph.name
try:
glyph_areas = self.data_area[glyph_name][layer]
except KeyError:
glyph_areas = self.sampleGlyphArea(glyph, layer, resample, True)
if draw: self.drawGlyphArea(glyph, layer)
glyph_window = self.sample_window
glyph_x_height = self.metrics.getXHeight(layer)
glyph_upm = self.metrics.getUpm()
return MetricSampler.getSB(glyph_areas, area_mult, glyph_window, glyph_x_height, glyph_upm)
# - Test ----------------------
if __name__ == '__main__':
font = pFont()
g = eGlyph()
ms = MetricSampler(font)
ms.getGlyphSB(g, None, 0.5, True, True)
g.updateObject(g.fl)
# - Finish ---------------------------
print('SAMPLER: %s;' %g.name)
| true |
90718b9f14097d05c4b55bcea2e915870ac0f1ea
|
Python
|
imtiaz-rahi/Py-CheckiO
|
/O'Reilly/I Love Python!/creative-3.py
|
UTF-8
| 385 | 2.8125 | 3 |
[] |
no_license
|
# https://py.checkio.org/mission/i-love-python/publications/gyahun_dash/python-3/except-me-in-the-lie/
def i_hate(language):
if language == 'Python':
raise MemoryError(language)
else:
return 'I hate {}!'.format(language)
def i_love_python():
try:
return i_hate('Python')
except MemoryError as me:
return 'I love {}!'.format(me.args[0])
| true |
ca5505ccf435a63558dfaa4c9dd2d5269a323b3c
|
Python
|
MrLIVB/BMSTU_AA
|
/hw_01/algorithm.py
|
UTF-8
| 806 | 3.1875 | 3 |
[
"MIT"
] |
permissive
|
def alg(matr, rows, cols):
avg = 0 # 1
avg_cnt = 0 # 2
for i in range(rows):
for j in range(cols):
avg = avg + matr[i][j] # 3
avg_cnt = avg_cnt + 1 # 4
avg = avg / avg_cnt # 5
max_el = matr[0][0] * avg # 6
max_el_cnt = 0 # 7
for i in range(rows):
for j in range(cols):
matr[i][j] = matr[i][j] * avg # 8
if matr[i][j] > max_el: #9
max_el = matr[i][j] # 10
max_el_cnt = 1 # 11
elif matr[i][j] == max_el: # 12
max_el_cnt += 1 # 13
return max_el_cnt # 14
matr = [[30, -12, 3, -21], [-3, -5, -6, 2], [-21, -21, -1, 0]]
print(alg(matr, 3, 4))
| true |
185ed25255e8092276f8be0d78850feef8e3fab3
|
Python
|
lixiang2017/leetcode
|
/adventofcode/2021/day12/part1/paths.py
|
UTF-8
| 1,005 | 2.921875 | 3 |
[] |
no_license
|
from collections import defaultdict
def paths_count(file_name):
graph = defaultdict(list)
big = set()
with open(file_name) as f:
for line in f:
line = line.strip()
u, v = line.split('-')
graph[u].append(v)
graph[v].append(u)
for x in [u, v]:
if x.isupper():
big.add(x)
paths = set()
def dfs(node, cur_path, seen_small):
if node == 'end':
paths.add(tuple(cur_path))
return
for child in graph[node]:
if child not in seen_small:
dfs(child, cur_path + [child],
seen_small | set([node if not node.isupper() else None])
)
dfs('start', [], set())
cnt = len(paths)
print('cnt: ', cnt)
return cnt
c1 = paths_count('input1')
c2 = paths_count('input2')
c3 = paths_count('input3')
c = paths_count('input')
'''
cnt: 10
cnt: 19
cnt: 226
cnt: 5874
'''
| true |
f86fb2b085ae58e4f109115a568fdbf77ddbeb98
|
Python
|
lucaskotres/EPMProcessorExamples
|
/advanced/distance_between_coordinates.py
|
UTF-8
| 282 | 3.5 | 4 |
[
"MIT"
] |
permissive
|
from math import cos, asin, sqrt
def distance(lat1, lon1, lat2, lon2):
p = 0.017453292519943295 #Pi/180
a = 0.5 - cos((lat2 - lat1) * p)/2 + cos(lat1 * p) * cos(lat2 * p) * (1 - cos((lon2 - lon1) * p)) / 2
return 12742 * asin(sqrt(a))
print(distance(25,49,26,53))
| true |
588bfb23a1357b742c08ae9c472b42072dac4000
|
Python
|
gmonkman/python
|
/opencvlib/perspective.py
|
UTF-8
| 5,068 | 2.84375 | 3 |
[] |
no_license
|
# pylint: disable=C0103, too-few-public-methods, locally-disabled,
# no-self-use, unused-argument
'''edge detection and skeletonization
'''
#Link to integration
#https://stackoverflow.com/questions/13320262/calculating-the-area-under-a-curve-given-a-set-of-coordinates-without-knowing-t
#https://www.khanacademy.org/math/ap-calculus-ab/integration-applications-ab/average-value-of-a-function-ab/v/average-function-value-closed-interval
#simpsons rule
class Camera():
'''just a container for camera properties
'''
def __init__(self, f, px_x, px_y, x_mm, y_mm):
self.f = f
self.px_x = px_x
self.px_y = px_y
self.x_mm = x_mm
self.y_mm = y_mm
class Measure():
'''a measure, just a variable container'''
def __init__(self, lens_subj_dist=None, marker_length_mm=None, marker_length_px=None):
self.lens_subj_dist = lens_subj_dist
self.marker_length_mm = marker_length_mm
self.marker_length_px = marker_length_px
def get_perspective_correction(bg_dist, object_depth, length):
'''(float, float)->float|None
Return the length corrected for the depth of the object
considering the backplane of the object to be the best
representative of the length
*NOTE* The length of the object has been accurately measured
'''
if bg_dist is None or object_depth is None or length is None:
return None
if bg_dist == 0 or 1 - (object_depth / bg_dist) == 0:
return None
return length / (1 - (object_depth / bg_dist))
def get_perspective_correction_iter_linear(coeff,
const,
bg_dist,
length,
profile_mean_height=1,
last_length=0,
stop_below_proportion=0.01):
'''(float, float, float, float,float)->float|None
Return the length corrected for the depth of the object
considering the backplane of the object to be the best
representative of the length.
*NOTE* The length of the object was itself estimated from the foreground standard measure
Coeff and constant are used to calculate an objects depth from its length
The object depth is used to create a iterative series sum which add to the length
to return the sum of lengths once the last length added was less then stop_below
stop_below_proportion is the stopping criteria, once the last
calculated length to add is is less than last_length*stop_below_proportion
we return the result and stop the iteration
'''
if bg_dist == 0 or bg_dist is None or coeff == 0 or coeff is None or length is None:
return None
if last_length == 0:
object_depth = length * coeff + const
else:
object_depth = last_length * coeff + const
if object_depth <= 0:
return length
if length == 0:
return 0
if (last_length / length < stop_below_proportion) and last_length > 0:
return length
if last_length == 0: # first call
l = get_perspective_correction(bg_dist, object_depth, length) - length
else:
l = get_perspective_correction(bg_dist, object_depth, last_length) - last_length
if l is None:
return None
return get_perspective_correction_iter_linear(coeff, const, bg_dist, length + (l * profile_mean_height), (l * profile_mean_height), stop_below_proportion)
def subjdist_knowndist(Known, Unknown):
'''(Class:Measure, Class:Measure) -> float|None
Get subject-lens distance
estimate from a photograph of known distance
with fiducial marker of known length
'''
#https://www.pyimagesearch.com/2015/01/19/find-distance-camera-objectmarker-using-python-opencv/
assert isinstance(Known, Measure)
assert isinstance(Unknown, Measure)
x = [Known.marker_length_px, Known.lens_subj_dist, Known.marker_length_mm, Unknown.marker_length_mm, Unknown.marker_length_px]
if not all(x):
return None
if Known.marker_length_mm == 0 or Unknown.marker_length_px == 0:
return None
F = Known.marker_length_px * Known.lens_subj_dist / Known.marker_length_mm
return Unknown.marker_length_mm * F / Unknown.marker_length_px
def subjdist_camera(Cam, Unknown):
'''(Class:Camera, Class:Measure) -> float|None
Estimate lens-subject distance from the camera properties
and the known marker length in mm and measure marker pixel
length
Currently assumes just using the width and not the height.
Camera properties needed are the:
Real cmos width in mm
The cmos width in pixels
The cameras focal length in mm
'''
assert isinstance(Cam, Camera)
assert isinstance(Unknown, Measure)
x = [Cam.f, Unknown.marker_length_mm, Cam.px_x, Unknown.marker_length_px, Cam.x_mm]
if not all(x):
return None
return (Cam.f * Unknown.marker_length_mm * Cam.px_x) / (Unknown.marker_length_px * Cam.x_mm)
| true |
ffbdeb67ca45b39f7f3c35ca05e9ba4176b11d10
|
Python
|
Pedrocereja/Pinball
|
/arduino.py
|
UTF-8
| 365 | 2.703125 | 3 |
[] |
no_license
|
from pyfirmata import Arduino, util, INPUT, OUTPUT
from time import sleep
class Sensor:
def __init__(self, pin, valor, board):
self.pin = board.get_pin('d:{0}:i'.format(pin))
self.valor = valor
self.pin.enable_reporting
sleep(0.05)
def status(self):
return self.pin.read()
#sensor = Sensor(2,500)
#while 1:
# print(sensor.status())
# sleep(0.05)
| true |
5b7532b5a7a8087a3b335a9a3a07a7eb6cc1369e
|
Python
|
vmkhlv/rusenteval
|
/probing/arguments.py
|
UTF-8
| 2,047 | 2.515625 | 3 |
[] |
no_license
|
from dataclasses import field, dataclass
@dataclass
class ProbingArguments(object):
"""
An object to store the experiment arguments
"""
seed: int = field(default=42, metadata={"help": "random seed for initialization"})
prepro_batch_size: int = field(
default=128, metadata={"help": "batch size for creating features"}
)
bucketing: bool = field(
default=True,
metadata={
"help": "whether to perform char-level sequence bucketing for pre-processing"
},
)
model_is_random: bool = field(
default=False,
metadata={"help": "whether to randomly initialize the transformer model"},
)
train_batch_size: int = field(
default=256, metadata={"help": "batch size for training"}
)
eval_batch_size: int = field(
default=128, metadata={"help": "batch size for evaluation"}
)
device: str = field(
default="cuda", metadata={"help": "the device used during training"}
)
input_dim: int = field(default=768, metadata={"help": "input embedding shape"})
num_hidden: int = field(
default=250,
metadata={"help": "number of hidden units in the non-linear classifier"},
)
max_iter: int = field(default=200, metadata={"help": "max number of epochs"})
droupout_rate: float = field(
default=0.2, metadata={"help": "dropout rate for the non-linear classifier"}
)
num_classes: int = field(default=2, metadata={"help": "number of target classes"})
learning_rate: int = field(
default=0.01, metadata={"help": "learning rate for the classifiers"}
)
clf: str = field(
default="logreg",
metadata={"help": "non-linear or linear classifier name (logreg, mlp)"},
)
num_kfold: int = field(
default=5, metadata={"help": "number of folds for k-fold training"}
)
balanced: bool = field(
default=True,
metadata={
"help": "whether to compute the weighted accuracy score if imbalanced"
},
)
| true |
7be7b8ca83eb23d3cf7f5d5e70ed3c01f6e942b5
|
Python
|
newcanopies/moon
|
/Kobuki/Kobuki_line_glider
|
UTF-8
| 5,907 | 2.828125 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
import roslib
import sys
import rospy
import cv2
import numpy as np
from cv_bridge import CvBridge, CvBridgeError
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
class LineFollower(object):
def __init__(self):
self.bridge_object = CvBridge()
self.image_sub = rospy.Subscriber("/camera/rgb/image_raw", Image, self.camera_callback)
self.move_robot = rospy.Publisher("/cmd_vel", Twist, queue_size= 1)
self.motionv = Twist()
def camera_callback(self,data):
try:
# store retrieved image in OpenCV variable
# variable data contains the ROS msg with captured image
# bgr8 encoding is the OpneCV pre-ROS legacy encoding
cv_image = self.bridge_object.imgmsg_to_cv2(data, desired_encoding="bgr8")
except CvBridgeError as e:
print(e)
# GUI window displaying contents of the variable "cv_image"
cv2.imshow("camera_raw", cv_image)
# waitKey function waits for a keystroke event, delay param is in miliseconds where 0 means forever
cv2.waitKey(1)
# CROP to ROI for faster detection
# conversion bgr2hsv
cv_new_image = cv_image[240:380,100:400]
hsv=cv2.cvtColor(cv_new_image, cv2.COLOR_BGR2HSV)
# get full image width
height, width, channels_orig = cv_new_image.shape
'''
# Define the Yellow Colour in HSV
#RGB
#[[[222,255,0]]]
#BGR
#[[[0,255,222]]]
"""
To know which color to track in HSV, Put in BGR. Use ColorZilla to get the color registered by the camera
>> yellow = np.uint8([[[B,G,R ]]])
>> hsv_yellow = cv2.cvtColor(yellow,cv2.COLOR_BGR2HSV)
>> print( hsv_yellow )
[[[ 34 255 255]]
"""
'''
# THRESHOLD of HSV colorspace cone for pixel color of interest
# lower_yellow = np.array([233,235,13]) in RGB space
lower_yellow = np.array([30,150,150]) # in HSV colorspace, HSV encoding removes variable of color saturation making color recognition faster across changing lighting conditions
# upper_yellow = np.array([255,255,53])
upper_yellow = np.array([80,255,255]) # in HSV colorspace
# mask considers the yellow subset all the different bits in the defined Upper and Lower values
# MASK filters yellow / not yellow
# prep for centroid calculation
# extracting only ROI features, ie: the yellow line or stars
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
# bitwise converts to binary: white for selected color, black blocks-out the rest
# bitwise binary determines area of the blob to calc centroid
ROI_mask = cv2.bitwise_and(cv_new_image,cv_new_image, mask= mask)
# .shape has specific information providing specific values Height Width Channel
height, width, channels = cv_image.shape
descentre = 160
rows_to_watch = 20
crop_img = cv_image[(height)/2+descentre:(height)/2+(descentre+rows_to_watch)][1:width]
# Centroids, concentration of color being filtered for
# CoM extrapolated into image; CoM of image Blobs
# calculated using integrals, using ImageMoments:
# obtain coordinates of cropped image where blob tracking centroids occur for the specified Yellow range
# Calculate centroid of the blob of binary image using ImageMoments
# detecting the moment of the WHITE part of masked image
m = cv2.moments(mask, False)
ImageMoment_is = False
try:
# assign cx and cy based on the ImageMoments
# except: prevents division with 0, instead divide by 2
# detect the centroid of the line
cx, cy = m['m10']/m['m00'], m['m01']/m['m00']
ImageMoment_is = True
# avoids python compilation errors
# outside of a compiled python environment, runs only as runtime error
except ZeroDivisionError:
cy, cx = height/2, width/2
ImageMoment_is = False
rospy.loginfo("Kobuki sees Yellow Line is: " +str(ImageMoment_is))
# Draw the centroid as a CIRCLE
# OpenCV supports drawing variety of things over the images, not only geometric shapes
# cv2.circle(img, center, radius, color[, thickness[, lineType[, shift]]])
cv2.circle(ROI_mask,(int(cx), int(cy)), 10,(0,0,255),-1)
# centering treshold
tresh_hold = 20
error_x = cx - width / 2;
# kobuki line-searching turn
if not ImageMoment_is:
self.motionv.linear.x = 0.0
self.motionv.angular.z = (-error_x / 100)
# kobuki left turn
elif cx < (width/2) - tresh_hold:
self.motionv.linear.x = 0.0
self.motionv.angular.z = -(-error_x / 100)
# kobuki right turn
elif cx > (width/2) + tresh_hold:
self.motionv.linear.x = 0.0
self.motionv.angular.z = +(-error_x / 100)
# kobuki motion vector straight
else:
self.motionv.linear.x = 0.5
self.motionv.angular.z = 0.0
# self.motionv based on width, cx
# start kobuki motion vector
self.move_robot.publish(self.motionv)
rospy.loginfo("Kobuki glide along Yellow line is: " +str(ImageMoment_is))
cv2.imshow("HSV", hsv)
cv2.imshow("MASK", mask)
cv2.imshow("ROI_mask", ROI_mask)
cv2.waitKey(1)
def main():
line_follower_object = LineFollower()
rospy.init_node('line_following_node', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| true |
4ebaef8dbc669ce2e56391dc20be8d7bbabf1c83
|
Python
|
CameronLuyt69/Python-Work
|
/DataTypesEx2.py
|
UTF-8
| 588 | 3.640625 | 4 |
[] |
no_license
|
Num = [56, 78, 34, 21, 56, 34, 125, 45, 89, 75, 12, 12, 56]
Sum = [56+78+34+21+56+34+124+45+89+75+12+56]
print(Sum)
Num = [56, 78, 34, 21, 56, 34, 125, 45, 89, 75, 12, 12, 56]
Num.sort()
print(Num)
print("Smallest element is:", min(Num))
print("Largest element is:", max(Num))
Num = list(dict.fromkeys(Num))
print(Num)
strName = input("Please enter your First name")
strSurname = input("Please enter your surname")
print(strName + " " + strSurname)
number1 = int(input("Please enter a number"))
number2 = int(input("please enter a other number"))
answer = number1 + number2
print(answer)
| true |
ab1e3a4ecc4fd24ebee8af0f66d451b97d76b119
|
Python
|
yuedongyang/GRASP
|
/evaluation.py
|
UTF-8
| 4,898 | 2.625 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/python
'''
Usage:
python3 evaluation.py --mode='single'
--test_dataset='ph'
--train_model='py'
--window_size=37
'''
import argparse
import json
from sklearn.externals import joblib
from utils import *
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='single', help='The trained model is trained by two training modes: single or global, \'single\' is trained by single dataset and \'global\' is trained by all datasets.')
parser.add_argument('--test_dataset', type=str, default='ph', help='This is for test-dataset. Choose from: \'py\' is PARS-Yeast \'ph\' is PARS-Human, \'pdb\' is NMR/X-ray.')
parser.add_argument('--train_model', type=str, default='py', help='This is for using which trained model. Choose from: \'py\' is PARS-Yeast \'ph\' is PARS-Human, \'pdb\' is NMR/X-ray and \'global\' is Consensus-model.')
parser.add_argument('--window_size', type=int, default=37, help='The window size when truncating RNA sequences.')
parser.add_argument('--output', type=str, default='./preds/', help='The directory for saving predictions on test-dataset.')
config = parser.parse_args()
window_size = config.window_size
outputfile_dir = config.output + dataset_type[config.test_dataset]
create_dir_if_not_exists(outputfile_dir)
if (config.mode == 'single'):
inputfile = './data/' + dataset_type[config.test_dataset] + '/' + config.test_dataset + '_encode_{}.csv'.format(window_size)
model = './model/' + dataset_type[config.train_model] + '/' + config.train_model +'_{}.model'.format(window_size)
outputfile = outputfile_dir + '/' + config.test_dataset + '_predicted_by_' + config.train_model + '_{}.csv'.format(window_size)
input_data = get_data(inputfile)
output = input_data
label_test = input_data['label']
input_data = input_data.drop(['label', 'sequences'], axis=1)
clf = joblib.load(model)
preds = clf.predict_proba(input_data)[:,1]
output['preds'] = preds
# output.to_csv(outputfile, index=True)
print ('training model:', config.train_model, 'test dataset:', config.test_dataset)
auc, acc, mcc, ppv, recall, f1 = eval(label_test, preds)
print ('auc:', auc)
print ('acc:', acc)
print ('mcc:', mcc)
print ('ppv:', ppv)
print ('recall:', recall)
print ('f1-score:', f1)
else :
input_pars_human_file = './data/10per_test/ph_encode_10per_test_{}.csv'.format(window_size)
input_pars_yeast_file = './data/10per_test/py_encode_10per_test_{}.csv'.format(window_size)
input_pdb_file = './data/10per_test/pdb_encode_10per_test_{}.csv'.format(window_size)
output_pars_human_preds_file = outputfile_dir + '/ph_encode_10per_test_preds_{}.csv'.format(window_size)
output_pars_yeast_preds_file = outputfile_dir + '/py_encode_10per_test_preds_{}.csv'.format(window_size)
output_pdb_preds_file = outputfile_dir + '/pdb_encode_10per_test_preds_{}.csv'.format(window_size)
model = './model/' + dataset_type[config.train_model] + '/' + config.train_model +'_{}.model'.format(window_size)
pars_human_test_data = get_data(input_pars_human_file)
pars_yeast_test_data = get_data(input_pars_yeast_file)
pdb_test_data = get_data(input_pdb_file)
ph_test_preds = pars_human_test_data
py_test_preds = pars_yeast_test_data
pdb_test_preds = pdb_test_data
pars_human_test_label = pars_human_test_data['label']
pars_yeast_test_label = pars_human_test_data['label']
pdb_test_label = pdb_test_data['label']
pars_human_test_data = pars_human_test_data.drop(['label', 'sequences'], axis=1)
pars_human_test_data = pars_yeast_test_data.drop(['label', 'sequences'], axis=1)
pdb_test_data = pdb_test_data.drop(['label', 'sequences'], axis=1)
clf = joblib.load(model)
ph_preds = clf.predict_proba(pars_human_test_data)[:,1]
py_preds = clf.predict_proba(pars_yeast_test_data)[:,1]
pdb_preds = clf.predict_proba(pdb_test_data)[:,1]
ph_test_preds['preds'] = ph_preds
ph_test_preds.to_csv(output_pars_human_preds_file, index=True)
py_test_preds['preds'] = py_preds
py_test_preds.to_csv(output_pars_yeast_preds_file, index=True)
pdb_test_preds['preds'] = pdb_preds
pdb_test_preds.to_csv(output_pdb_preds_file, index=True)
print ('training model:', config.train_model)
py_auc = metrics.roc_auc_score(pars_yeast_test_label, py_preds)
print ('auc of test dataset: PARS-Yeast:', py_auc)
ph_auc = metrics.roc_auc_score(pars_human_test_label, pars_human_test_label)
print ('auc of test dataset: PARS-Human:', ph_auc)
pdb_auc = metrics.roc_auc_score(pdb_test_label, pdb_preds)
print ('auc of test dataset: NMR/X-ray:', pdb_auc)
# save_auc = 'score.json'
# score_json = {}
# score_json['ph'] = roc_auc_ph
# score_json['py'] = roc_auc_py
# score_json['pdb'] = roc_auc_pdb
# with open(save_auc,'a') as f:
# json.dump(score_json, f, ensure_ascii=False)
# f.write('\n')
# print("save end")
| true |
8a2747b40cf4179f4a8c88e25b19fbb5a856a6f6
|
Python
|
anhaeh/pyescoba
|
/pygame_entities/sprites.py
|
UTF-8
| 1,971 | 3.046875 | 3 |
[] |
no_license
|
import pygame
from pygame.locals import RLEACCEL
class CardSprite(pygame.sprite.Sprite):
def __init__(self, card, posx, posy, index, show_card):
self.image_number = card.number
if card.number > 7:
self.image_number = card.number + 2
pygame.sprite.Sprite.__init__(self)
self.rect = None
self.image = None
self.card = card
self.index = index
self.set_image(posx, posy, show_card)
def set_image(self, posx, posy, show_card):
if show_card:
image = load_image("images/%s/%d.jpg" % (self.card.card_type, self.image_number))
else:
image = load_image("images/back.jpg")
image = image.subsurface((8, 8, 86, 124))
self.image = image
self.rect = self.image.get_rect()
self.rect.x = posx
self.rect.y = posy
class EscobaSprite(CardSprite):
def __init__(self, card, posx, posy, index):
super(EscobaSprite, self).__init__(card, posx, posy, index, True)
def set_image(self, posx, posy, show=True):
image = load_image("images/%s/%d.jpg" % (self.card.card_type, self.image_number))
image = image.subsurface((8, 8, 86, 124))
self.image = pygame.transform.rotate(image, 90)
self.rect = self.image.get_rect()
self.rect.x = posx - 50
self.rect.y = posy + 20
def load_image(filename, transparent=False):
try:
image = pygame.image.load(filename)
except pygame.error, message:
raise SystemExit, message
image = image.convert()
if transparent:
color = image.get_at((0, 0))
image.set_colorkey(color, RLEACCEL)
return image
def draw_text(message, posx, posy, color=(255, 255, 255)):
font = pygame.font.Font("fonts/DroidSans.ttf", 20)
out = pygame.font.Font.render(font, message, 1, color)
out_rect = out.get_rect()
out_rect.centerx = posx
out_rect.centery = posy
return out, out_rect
| true |
845d45732da9cf465acc8e1d3d9940a4693272e4
|
Python
|
RickyHuo/leetcode
|
/python/python2/uncommon-words-from-two-sentences.py
|
UTF-8
| 553 | 3.1875 | 3 |
[] |
no_license
|
class Solution(object):
def uncommonFromSentences(self, A, B):
"""
:type A: str
:type B: str
:rtype: List[str]
"""
items = {}
res = []
for i in A.split(" ") + B.split(" "):
if items.has_key(i):
items[i] += 1
else:
items[i] = 1
for i in items:
if items[i] == 1:
res.append(i)
return res
if __name__ == '__main__':
print Solution().uncommonFromSentences("apple apple", "banaba")
| true |
c2ef9df26ad6bf9334390528fabf4d11aaa8d872
|
Python
|
Abhijith-1997/pythonprojects
|
/array/pop.py
|
UTF-8
| 63 | 2.734375 | 3 |
[] |
no_license
|
lst=[1,2,3,4,5]
# lst.pop(2)
# print(lst)
lst.pop()
print(lst)
| true |
b7c5bdacae6151bc34930575b9103d5131b8052d
|
Python
|
rundongliu/leetcode-python
|
/Pascal's Triangle.py
|
UTF-8
| 417 | 3.03125 | 3 |
[] |
no_license
|
class Solution:
# @return a list of lists of integers
def generate(self, numRows):
result = []
if numRows==0:
return result
result.append([1])
for i in range(2,numRows+1):
lst = [1]
for j in range(1,i-1):
lst.append(result[-1][j-1]+result[-1][j])
lst.append(1)
result.append(lst)
return result
| true |
9ba138e0f3a12be8c7a179a07e7c67f73cfd081c
|
Python
|
mollinaca/ac
|
/code/practice/abc/abc020/b.py
|
UTF-8
| 88 | 2.734375 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
n,m = input().split()
print (int(n+m)*2)
| true |
1102f389120000da978d08f07f63f300fd1bd163
|
Python
|
swmmrman/TempMonitor
|
/live_monitor.py
|
UTF-8
| 740 | 2.84375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
"""Live monitor for the arduino chinchilla room temperature monitor"""
import sys
import time
import serial
try:
mon = serial.Serial('/dev/ttyUSB0', 115200)
while True:
line = mon.readline().decode('utf-8').strip()
t = time.asctime(time.localtime(time.time()))
(station, temp, humid, count) = line.split()
station = station.strip("R")
print(
F"{t}: Station:{station} "
F"Temperature: {temp} Humidity: {humid}"
)
except KeyboardInterrupt:
print("Cleaning up")
mon.close()
sys.exit(1)
except FileNotFoundError:
print("/dev/ttyUSB0 does not exist. Please ensure the main station is on.")
mon.close()
sys.exit(1)
| true |
58d7f1567af6463a60e7046198f8672888d7aed1
|
Python
|
Shamir-Lab/Recycler
|
/paper/tally_hits.py
|
UTF-8
| 4,264 | 2.671875 | 3 |
[] |
permissive
|
import argparse, sys
sys.path.insert(0, '../recycle/')
from recycle.utils import *
import numpy as np
def count_property_range_hits(prop, node_dict, hits):
""" picks which values to use in tuples based on property
counts vals having min_val <= val < max_val
unless max_val == overall_max, where min_val <= val <= max_val
is used instead
"""
res = []
# sets tuple position to use in dict value
switcher = {
"length": (0,(0,4000,8000,12000,16000,20000)),
"steps": (1,(0,2,4,8,16,32)),
"cov": (2,(1,10,100,1000,10000,100000)),
"cv": (3, (0,0.05,0.10,0.15,0.20,0.25))
}
if prop not in switcher:
return res
tup_pos = switcher[prop][0]
node_cnt = 0
pos_cnt = 0
for ind in range(len(switcher[prop][1])-1):
min_val = switcher[prop][1][ind]
max_val = switcher[prop][1][ind+1]
for node in node_dict.keys():
val = node_dict[node][tup_pos]
if ind < len(switcher[prop][1])-2:
range_test_val = (min_val <= val < max_val)
else:
range_test_val = (min_val <= val <= max_val)
# print "range bool is", range_test_val
if range_test_val:
node_cnt += 1
if node in hits: pos_cnt += 1
if node_cnt > 0:
res.append( (pos_cnt, node_cnt, round(float(pos_cnt)/node_cnt,2)))
else:
res.append((0,0,0))
node_cnt = 0
pos_cnt = 0
return res
def parse_user_input():
parser = argparse.ArgumentParser(
description=
'counts number of candidates vs TP hits for a certain property and range combination'
)
parser.add_argument('-p','--pref',
help='prefix to recycler outputs',
required=True, type=str
)
parser.add_argument('-n',
'--nodes', help='nodes list including accepted hits to reference sequences',
required=True, type=str
)
return parser.parse_args()
def get_path_vals_cv(path, covs, max_k_val=55):
""" returns cv value based on coverage values
when path was removed - stored in
paths_w_cov.txt output file
"""
wgts = np.array([(get_length_from_spades_name(n)-max_k_val) for n in path])
tot_len = sum(wgts)
wgts = np.multiply(wgts, 1./tot_len)
mean = np.average(covs, weights = wgts)
# try:
# diffs = covs - mean
std = np.sqrt(np.dot(wgts,(covs-mean)**2))
return std/mean
############### ENTRY POINT ####################
# inputs: paths_w_cov.txt file,
args = parse_user_input()
paths_file = args.pref + '.cycs.paths_w_cov.txt'
# create dict of RNODES as keys, values
# as tuples of (total_length, num_steps, coverage)
f = open(paths_file, 'r')
lines = f.readlines()
rnode_dict = {}
for ind in range(len(lines)/4):
name = lines[ind*4].rstrip()
path = lines[ind*4 + 1].rstrip()
path_covs = np.array([float(a) for a in lines[ind*4 + 2].rstrip()[1:-1].split(",")])
cov = get_cov_from_spades_name(name)
length = get_length_from_spades_name(name)
num_steps = len(path.split(','))
cv = get_path_vals_cv(path[1:-1].split(","), path_covs)
# print name, num_steps, path
rnode_dict[name] = (length, num_steps, cov, cv)
# issue - single node path RNODEs are not in path_w_cov files
# read in cycs.fasta file, add back single node paths
cycs_file = args.pref + '.cycs.fasta'
f = open(cycs_file, 'r')
lines = f.readlines()
for ind in range(len(lines)/2):
name = lines[ind*2][1:].rstrip()
if name in rnode_dict: continue
else:
cov = get_cov_from_spades_name(name)
length = get_length_from_spades_name(name)
# print name, 1
rnode_dict[name] = (length, 1, cov, 0)
# nucmer.delta file parsed to RNODES having 100/80 hits with
#/home/gaga/rozovr/MUMmer3.23/show-coords -r -c -l /home/nasheran/rozovr/recycle_paper_data/ref_800/before_rr.nucmer.delta | awk '$10==100.00 && $15>=80.00' | cut -d'|' --complement -f 1-6 | cut -f2 > /home/nasheran/rozovr/recycle_paper_data/rnode_hits.txt
# need to read names in and get set
hits_file = args.nodes
f = open(hits_file,'r')
hits = set([])
lines = f.readlines()
for line in lines:
hits.add(line.rstrip())
print "length: ", count_property_range_hits("length", rnode_dict, hits)
print "steps: ", count_property_range_hits("steps", rnode_dict, hits)
print "coverage: ", count_property_range_hits("cov", rnode_dict, hits)
print "CV: ", count_property_range_hits("cv", rnode_dict, hits)
| true |
8aa8cb342a59fa79045b983f2010490ab4258fca
|
Python
|
Bayonetta/ZSChatSystem
|
/check.py
|
UTF-8
| 661 | 3.140625 | 3 |
[
"Apache-2.0"
] |
permissive
|
from binascii import b2a_hex, a2b_hex
from Crypto.Cipher import DES
import sys
#key = '12345678'
while 1:
key = raw_input('Please input the key(8 bytes): ')
if key == '12345678':
file = open('history', 'r')
try:
text = file.read()
finally:
file.close()
obj = DES.new(key)
get_cryp = a2b_hex(text)
after_text = obj.decrypt(get_cryp)
print '\nChat History: \n' + after_text
break;
else:
result = raw_input("Wrong!Input anything try again!(If you won't try another time, just input 'no') Your Answer is: ")
if result == 'no':
break;
| true |
2f69cab1f6c54b5ea04ebab413e73dc88f8759ac
|
Python
|
markhdavid/NYBC
|
/PyYiddish/corpus_builder.py
|
UTF-8
| 2,756 | 3.03125 | 3 |
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
import nltk
from xml.etree import ElementTree as et
import json
import codecs
import argparse
def crappy_tokenize(path):
prefix = "{http://www.mediawiki.org/xml/export-0.8/}"
all_wiki_pages = et.parse(path)
root = all_wiki_pages.getroot()
structured_data = {}
raw_data = {}
id_ = 0
for page in root.findall(prefix+"page"):
title = page.findall(prefix+"title")[0].text
body = page.findall(prefix+"revision/"+prefix+"text")[0].text
if title and body:
tokenized_title = nltk.word_tokenize(title)
tokenized_body = nltk.word_tokenize(body)
structured_data[id_] = (tokenized_title, tokenized_body)
raw_data[id_] = (title, body)
print u"{0:d} - {1:s}".format(id_, title)
id_ += 1
return structured_data, raw_data
def sort_and_scrub(all_articles, max_articles=50):
sorted_articles = sorted(all_articles.iteritems(), key=lambda d: len(d[1][1]),
reverse=True)
good_articles = {}
num_good = 0
for id_, (title,body) in sorted_articles:
if num_good == max_articles:
break
print body
is_good = raw_input("Is this good data? (y/n): ")
if is_good.startswith("y"):
good_articles[id_] = (title,body)
num_good += 1
print "saving {:s}".format(title)
return good_articles
def save_useful_by_id(file_path, data):
with codecs.open(file_path, "w", "UTF-8") as yjson:
yjson.write(json.dumps(data, ensure_ascii=False))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--scrub", help="file which will be read in")
parser.add_argument("-o", "--output", help="where to put raw text (in json)")
parser.add_argument("-a", "--all", help="write all articles to disk (in json)",
action="store_true")
parser.add_argument("-n", "--n_docs", help="number of documents you want to save")
parser.add_argument("-t", "--tokenize", help="tokenize output location")
args = parser.parse_args()
tokenized,raw = crappy_tokenize(args.scrub)
print "Done reading in and tokenizing"
if args.all:
scrubbed = sort_and_scrub(raw, -1)
elif args.n_docs:
scrubbed = sort_and_scrub(raw, args.n_docs)
else:
scrubbed = sort_and_scrub(raw)
print "saving files...",
save_useful_by_id(args.output, scrubbed)
if args.tokenize:
better_tokenized = {}
for k,_ in scrubbed.iteritems():
better_tokenized[k] = tokenized[k]
save_useful_by_id(args.tokenize, better_tokenized)
print "[DONE]"
if __name__ == "__main__":
main()
| true |
868171a525e7848d25207f0dab842b9a26674570
|
Python
|
edfan/Project-Euler
|
/38.py
|
UTF-8
| 846 | 3.734375 | 4 |
[] |
no_license
|
def pandigital(x):
currentnum = []
for y in str(x):
currentnum.append(int(y))
currentnum.sort()
if currentnum == [1, 2, 3, 4, 5, 6, 7, 8, 9]:
return True
else:
return False
currentmax = 0
for x in range(10, 34):
concenated = int(str(x) + str(2 * x) + str(3 * x) + str(4 *x))
if pandigital(concenated) == True and concenated > currentmax:
currentmax = concenated
print(x)
for x in range(100, 334):
concenated = int(str(x) + str(2 * x) + str(3 * x))
if pandigital(concenated) == True and concenated > currentmax:
currentmax = concenated
print(x)
for x in range(5000, 10000):
concenated = int(str(x) + str(2 * x))
if pandigital(concenated) == True and concenated > currentmax:
currentmax = concenated
print(x)
print(currentmax)
| true |
9d3ab2640ae5581b35ff61e261a1f7c756954084
|
Python
|
yuolvv/TestSpider
|
/xiaobai/MongoQueue.py
|
UTF-8
| 3,528 | 3.1875 | 3 |
[] |
no_license
|
from datetime import datetime,timedelta
from pymongo import MongoClient,errors
class MongoQueue():
#初始状态
OUTSTANDING = 1
#正在下载状态
PROCESSING = 2
#下载完成状态
COMPLETE = 3
def __init__(self,db,collection,timeout=300):
#初始mongodb连接
self.client = MongoClient()
self.Client = self.client[db]
self.db = self.Client[collection]
self.timeout = timeout
def __bool__(self):
"""
这个函数,我的理解是如果下面的表达为真,则整个类为真
至于有什么用,后面我会注明的(如果我的理解有误,请指点出来谢谢,我也是Python新手)
$ne的意思是不匹配
"""
record = self.db.find_one({'status':{'$ne':self.COMPLETE}})
return True if record else False
def push(self,url,title):
# 这个函数用来添加新的URL进队列
try:
self.db.insert({
'_id':url,
"status":self.OUTSTANDING,
'主题':title
})
print(url,'插入队列成功')
except errors.DuplicateKeyError as e:
# 报错则代表已经存在于队列之中了
print(url,'已经存在于队列中')
pass
def push_imgurl(self,title,url):
try:
self.db.insert({
'_id':title,
'status':self.OUTSTANDING,
'url':url
})
print('图片地址插入成功')
except errors.DuplicateKeyError as e:
print('地址已经存在了')
pass
def pop(self):
"""
这个函数会查询队列中的所有状态为OUTSTANDING的值,
更改状态,(query后面是查询)(update后面是更新)
并返回_id(就是我们的URL),MongDB好使吧,^_^
如果没有OUTSTANDING的值则调用repair()函数重置所有超时的状态为OUTSTANDING,
$set是设置的意思,和MySQL的set语法一个意思
"""
record = self.db.find_and_modify(
query={'status':self.OUTSTANDING},
update={'$set':{
'status':self.PROCESSING,
'timestamp':datetime.now()
}}
)
if record:
return record['_id']
else:
self.repair()
raise KeyError
def pop_title(self,url):
record = self.db.find_one({'_id':url})
return record['主题']
def peek(self):
"""这个函数是取出状态为 OUTSTANDING的文档并返回_id(URL)"""
record = self.db.find_one({'status':self.OUTSTANDING})
if record:
return record['_id']
def complete(self,url):
"""这个函数是更新已完成的URL完成"""
self.db.update({'_id':url},{'$set':{'status':self.COMPLETE}})
def repair(self):
"""这个函数是重置状态 $lt是比较"""
record = self.db.find_and_modify(
query={
'timestamp':{'$lt':datetime.now()-timedelta(seconds=self.timeout)},
'status':{'$ne':self.COMPLETE}
},
update={'$set':{'status':self.OUTSTANDING}}
)
if record:
print('重置URL状态',record['_id'])
def clear(self):
"""这个函数只有第一次才调用、后续不要调用、因为这是删库啊!"""
self.db.drop()
| true |
513849c55c5105baf5a06a00711938b64464fb87
|
Python
|
n0execution/Cracking-the-code-interview
|
/Stacks_Queues/python/LimitedStack.py
|
UTF-8
| 866 | 3.578125 | 4 |
[] |
no_license
|
from exceptions import StackFullException, StackEmptyException
class LimitedStack(object):
def __init__(self, max_size):
self.max_size = max_size
self.size = 0
self.top = None
self.elements = []
def __str__(self):
return self.elements.__str__()
def push(self, x):
if self.is_full():
raise StackFullException()
self.elements.append(x)
self.top = x
self.size += 1
def pop(self):
top = self.top
if self.is_empty():
raise StackEmptyException()
self.elements.pop()
self.size -= 1
if self.is_empty():
return top
self.top = self.elements[self.size-1]
return top
def is_full(self):
return self.size == self.max_size
def is_empty(self):
return self.size == 0
| true |
ba124f038f63ace2a0106ef750af00a92f0b00dd
|
Python
|
sara/garfield
|
/garfield/dashboard/util.py
|
UTF-8
| 545 | 3.171875 | 3 |
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
import datetime
import pytz
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + datetime.timedelta(n)
def daterange_by_week(year, week):
start_date = datetime.datetime.strptime("{0} {1} 1".format(year, week),
"%Y %W %w")
timezone = pytz.timezone("utc")
start_date = timezone.localize(start_date)
return [x for x in daterange(start_date,
start_date + datetime.timedelta(days=7))]
| true |
61a72a952b76885e2d5c1733e22cb5a4dbcfdeaa
|
Python
|
mosesxie/CS1114
|
/Lab #11/q4.py
|
UTF-8
| 522 | 3.15625 | 3 |
[] |
no_license
|
def html_table_generator(filename):
f = open(filename, 'r', encoding="UTF-8")
html = "<html> \n<table> \n"
for line in f:
row = line.split(",")
html = html + "\t <tr> \n"
for word in row:
html = html + "\t\t<th>" + word + "</th>\n"
html = html + "\t</tr> \n"
html = html + "</table> \n</html>"
return html
f.close()
def main():
w = html_table_generator("lab11_q4_samplefile1-mac.csv")
print (w)
main()
| true |
7038a994503a1106925eee57e712cd2c4fce0d5b
|
Python
|
8devendra/python-openCV-project
|
/07_date_video.py
|
UTF-8
| 795 | 2.671875 | 3 |
[] |
no_license
|
import cv2
import datetime
cap=cv2.VideoCapture(0)
w=str(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h=str(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print(w)
print(h)
##you can set size of cam win
#set(cv2.CAP_PROP_FRAME_WEIDTH,202)
while(cap.isOpened()):
ret, frame=cap.read()
if ret == True:
font=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX
dat=str(datetime.datetime.now())
text=w + 'X' + h + ' ' + dat
frame=cv2.putText(frame,text,(10,50),font,1,(0,255,255),1,cv2.LINE_AA) #write text on frame
cv2.imshow('frme',frame)
if cv2.waitKey(1) & 0XFF==ord('q'):
break
elif cv2.waitKey(1) & 0XFF==ord('y'):
cv2.imwrite('videoCapWtDate.png',frame)
break
else:
break
cap.release()
cv2.destroyAllWindows()
| true |
c3294d14dcfcd9340c61bc2ac7e9fbe76f58e860
|
Python
|
miocalla/LCA_Python
|
/LCA_Python/LCA_Test.py
|
UTF-8
| 2,591 | 3.421875 | 3 |
[] |
no_license
|
import unittest
from LCA import *
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
root.right.left = Node(6)
root.right.right = Node(7)
'''
1
/ \
2 3
/\ /\
4 5 6 7
'''
class testLowestCommonAncestor(unittest.TestCase):
def setUp(self):
pass
def testNormalCases(self):
# Same-level
self.assertEqual(findLowestCommonAncestor(root, 2, 3), 1)
self.assertEqual(findLowestCommonAncestor(root, 4, 6), 1)
# Different-Level
self.assertEqual(findLowestCommonAncestor(root, 2, 7), 1)
self.assertEqual(findLowestCommonAncestor(root, 4, 3), 1)
def testLCANotRoot(self):
self.assertEqual(findLowestCommonAncestor(root, 6, 7), 3)
self.assertEqual(findLowestCommonAncestor(root, 4, 5), 2)
def testNullTree(self):
root = None
self.assertEqual(findLowestCommonAncestor(root, 2, 3), -1)
path = []
self.assertFalse(findPath(root, path, 8))
self.assertListEqual(path, [])
def testRootAsInput(self):
self.assertEqual(findLowestCommonAncestor(root, 1, 4), 1)
def testOnlyOneExists(self):
self.assertEqual(findLowestCommonAncestor(root, 4, 10), -1)
self.assertEqual(findLowestCommonAncestor(root, 21, 7), -1)
def testLCASameNode(self):
self.assertEqual(findLowestCommonAncestor(root, 1, 1), 1)
self.assertEqual(findLowestCommonAncestor(root, 7, 7), 7)
def testNeitherExist(self):
self.assertEqual(findLowestCommonAncestor(root, 10, 8), -1)
def testOnlyOneNode(self):
root = Node(10)
self.assertEqual(findLowestCommonAncestor(root, 10, 10), 10)
def testPathFinder(self):
path = []
# Root to itself
self.assertTrue(findPath(root, path, 1))
self.assertListEqual(path, [1])
path = []
# Right Side
self.assertTrue(findPath(root, path, 7))
self.assertListEqual(path, [1, 3, 7])
path = []
# Left Side
self.assertTrue(findPath(root, path, 2))
self.assertListEqual(path, [1, 2])
path = []
# No Path
self.assertFalse(findPath(root, path, 12))
self.assertListEqual(path, [])
# Path not from root
path = []
self.assertTrue(findPath(root.right, path, 7))
self.assertListEqual(path, [3, 7])
print('Success.')
if __name__ == '__main__':
unittest.main()
| true |
0637422638e477dfeeec7bbf592a9e489949fd8d
|
Python
|
eraserxp/energy_transfer
|
/changing_field_orientation/center_locations.py
|
UTF-8
| 396 | 2.625 | 3 |
[] |
no_license
|
from numpy import *
from pylab import *
x, y = loadtxt("center_locations.dat", usecols=[1,2], unpack=True)
#(a, b) = polyfit(x,y,1)
#angle = arctan(a)*180/pi
#angle = angle.round(decimals=1)
#yfit = polyval([a,b],x)
title("The motion of wavepacket in space")
plot(x,y, 'k.')
xlabel('x')
ylabel('y')
#plot(x,yfit,'r')
savefig("wavepacket_motion_kax_0.5pi_kay_0.5pi_theta_0_90_phi_0.pdf")
show()
| true |
d017a70d825bd4f94ab11394bba8c341c5b9dd87
|
Python
|
rajikalk/Scripts
|
/Modules/sfrimann/binned_statistic.py
|
UTF-8
| 1,135 | 2.53125 | 3 |
[] |
no_license
|
#!/usr/bin/env python
# so that python 2.x and 3.x will work the same
from __future__ import print_function, absolute_import, division
from scipy.stats import binned_statistic as bin_stat
import numpy as np
def binned_statistic(xdata,ydata,range=None,xlog=False,ylog=False,nbin=10,sigma=1.):
if range is None:
if xlog:
range = [np.log10(xdata.min()),np.log10(xdata.max())]
else:
range = [xdata.min(),xdata.max()]
else:
if xlog:
range = [np.log10(range[0]),np.log10(range[1])]
xval = np.log10(xdata) if xlog else xdata
yval = np.log10(ydata) if ylog else ydata
def mad(x):
return 1.4826*np.median(np.abs(np.median(x) - x))
count, xe, binno = bin_stat(xval,yval,statistic='count',bins=nbin,range=range)
y , xe, binno = bin_stat(xval,yval,statistic='median',bins=nbin,range=range)
ys, xe, binno = bin_stat(xval,yval,statistic=mad,bins=nbin,range=range)
xc = 0.5*(xe[1:] + xe[:-1])
yl, yu = y-sigma*ys, y+sigma*ys
y = 10**y if ylog else y
yl = 10**yl if ylog else yl
yu = 10**yu if ylog else yu
xc = 10**xc if xlog else xc
return xc, y, yl, yu, ys, count
| true |
7d4113d59a09fca22a47b35baad1dab3825819b6
|
Python
|
miniyk2012/leetcode
|
/leetcode_projects/leetcode_78/solution.py
|
UTF-8
| 630 | 3.328125 | 3 |
[] |
no_license
|
from typing import List
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
if not nums:
return [[]]
rets = []
self.dfs_subsets(nums, 0, rets, [])
return rets
def dfs_subsets(self, nums, index, rets, ret):
if index >= len(nums):
rets.append(ret[:])
return
self.dfs_subsets(nums, index + 1, rets, ret)
ret.append(nums[index])
self.dfs_subsets(nums, index + 1, rets, ret)
ret.pop()
if __name__ == '__main__':
nums = [1, 2, 3]
s = Solution()
rets = s.subsets(nums)
print(rets)
| true |
836759ce41b239cf596a05544059103e1e51de0f
|
Python
|
soyoonjeong/python_crawling
|
/soyoon news/main.py
|
UTF-8
| 706 | 2.796875 | 3 |
[] |
no_license
|
import requests
from flask import Flask, render_template, request
base_url = "http://hn.algolia.com/api/v1"
# This URL gets the newest stories.
new = f"{base_url}/search_by_date?tags=story"
# This URL gets the most popular stories
popular = f"{base_url}/search?tags=story"
# This function makes the URL to get the detail of a storie by id.
# Heres the documentation: https://hn.algolia.com/api
# def make_detail_url(id):
# return f"{base_url}/items/{id}"
db = {}
app = Flask("DayNine")
@app.route("/")
def home():
data = requests.get(new)
print(data)
return render_template("index.html")
@app.route("/contact")
def potato():
return "Contact me!"
app.run(host="127.0.0.1")
| true |
e0b2e962db2ea1fb7fdf06cb5ef53f6988a405d8
|
Python
|
tfredwell/cs4500
|
/cozmo_taste_game/robot/cozmo_robot.py
|
UTF-8
| 2,419 | 2.84375 | 3 |
[] |
no_license
|
import asyncio
from asyncio import sleep
from typing import List
from cozmo.robot import Robot
from cozmo_taste_game.robot import EvtWrongFoodGroup, EvtCorrectFoodGroup, EvtUnknownTag, EvtNewGameStarted
import logging
logger = logging.getLogger('cozmo_taste_game.robot')
class RealTasterBot:
def __init__(self):
self.cozmo = None
self.world = None
async def __start_new_game(self, evt: EvtNewGameStarted, **kw) -> None:
logger.info(f'recv event {evt}')
await self.__safe_say_text(f'I am hungry for some {evt.food_group.name}')
async def __unknown_tag(self, evt: EvtUnknownTag, **kw) -> None:
logger.info(f'recv event {evt}')
await self.__safe_say_text('Hmm, I do not know what that is!')
async def __wrong_food(self, evt: EvtWrongFoodGroup, **kw) -> None:
logger.info(f'recv event {evt}')
await self.__safe_say_text(f'A {evt.food_item.name} is not a {evt.expected_food_group.name}')
async def __correct_food(self, evt: EvtCorrectFoodGroup, **kw) -> None:
logger.info(f'recv event {evt}')
msg = f'Yum! The {evt.food_item.food_group.name} {evt.food_item.name} is {evt.food_item.taste}'
await self.__safe_say_text(msg, in_parallel=True)
async def __safe_say_text(self, text, animations: List[object] = None, **kwargs):
if self.cozmo:
action = self.cozmo.say_text(text, kwargs)
await action.wait_for_completed()
else:
logger.info(f'cozmo is not connected, not saying {text}')
await asyncio.sleep(.1)
def connect(self, engine, cozmo_instance: Robot):
self.cozmo = cozmo_instance
self.world = cozmo_instance.world
engine.add_event_hander(EvtNewGameStarted, self.__start_new_game)
engine.add_event_hander(EvtUnknownTag, self.__unknown_tag)
engine.add_event_hander(EvtWrongFoodGroup, self.__wrong_food)
engine.add_event_hander(EvtCorrectFoodGroup, self.__correct_food)
def disconnect(self, engine) -> None:
"""
Disconnects a robot from the game engine
"""
engine.remove_event_handler(EvtNewGameStarted, self.__start_new_game)
engine.remove_event_handler(EvtUnknownTag, self.__unknown_tag)
engine.remove_event_handler(EvtWrongFoodGroup, self.__wrong_food)
engine.remove_event_handler(EvtCorrectFoodGroup, self.__correct_food)
| true |
2696f6499ed2f39bd3100a0aef7be905b2e7ccb7
|
Python
|
keisuke-isobe/Magic8Ball
|
/cs2a-flask/flask_final_project.py
|
UTF-8
| 6,643 | 3.484375 | 3 |
[] |
no_license
|
"""
This flask app presents a webpage which acts as an interactive Magic 8 Ball. It has a
larger variety of responses (and more ambiguity) than your traditional Magic 8 Ball.
There are two necessary APIs for this project: the indico.io API which was used in class,
and the Google Cloud Natural Language Processing API. To install indico.io, install pillow
(pip install Pillow) and (pip install indicoio). If you experience errors, you may need
to install an older version of Pillow. To install the Google API, follow this link:
To install the Google API, follow this link: https://cloud.google.com/natural-language/docs/reference/libraries
Something interesting: We used the indic.io and Google Cloud Natural Language Processing API
to determine the highest importance noun in the user input question if the user input question
was not a simple yes/no question, and used that extracted noun in the Magic 8 Ball's
vague, non-committal response to make it seem a bit more intelligent.
"""
import random
import string
import indicoio
import operator
indicoio.config.api_key = 'f954e20684d172b9ebcc869bc9fac4b1'
from google.cloud import language
language_client = language.Client()
from flask import Flask, render_template, request
app = Flask(__name__)
default_positive = ['It is certain', 'It is decidedly so', 'Without a doubt',
'Yes definitely', 'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good',
'Yes', 'Signs point to yes']
default_neutral = ['Reply hazy, try again', 'Ask again later', 'Better not to tell you now',
'Cannot predict now', 'Concentrate and ask again']
default_negative = ['Don\'t count on it', 'My reply is no', 'My sources say no',
'Outlook not so good', 'Very doubtful']
defaults = default_positive + default_neutral + default_negative
wh_words = ["who", "what", "when", "where", "which", "who", "whom", "whose", "why", "how"]
yn_words = ["can", "could", "may", "might", "shall", "should", "will", "would", "must", "ought", "be", "do", "have"]
question_words = wh_words + yn_words
"""
Function which returns if the input text is a question or not. This is
determined by checking first if the sentence ends with a question mark.
If it does, it is assumed that the statement is a question. If it doesn't
end with a question mark, the function then checks if the statement
contains any of the words that are associated with a question, if it
does, it is considered a question.
"""
def isQuestion(text):
text = text.translate(string.punctuation)
text = text.lower()
if text[-1] == '?':
return True
else:
return not set(text).isdisjoint(question_words)
"""
Function which returns whether or not a passed statement is a yes-no question or
not. Yes-no questions are asked with the verbs "be," "do," "have," or a modal/
auxiliary verb. The function here checks whether or not the statement contains
any of these verbs, and if so, returns true. Otherwise it returns false.
"""
def isYesNoQuestion(text):
text = text.translate(string.punctuation)
text = text.lower()
text = text.split(" ")
return not set(text).isdisjoint(yn_words)
"""
Takes in a text string and returns a random response from a magic 8-ball.
"""
def random_response(submission):
random_answer = random.randint(0, 10) + random.randint(0,9)
return defaults[random_answer];
"""
This function returns the keywords found in the user input statement. This
function removes keywords that are longer than one word, because we only want
a dictionary that contains singe-word entries.
"""
def keyword(user_input):
keywords = indicoio.keywords(user_input, top_n = 5)
for k in list(keywords.keys()):
s = k.split(" ")
if len(s) > 1:
del(keywords[k])
return keywords
"""
This function takes the keywords dictionary returned by the indicoio keywords
function and returns it in sorted order from highest to lowest importance, in
string format.
"""
def sortKeywords(keywords):
sorted_keywords = sorted(keywords.items(), key = operator.itemgetter(1), reverse = True)
sorted_keywords_keys = []
for i in range(0,len(sorted_keywords)):
sorted_keywords_keys.append(sorted_keywords[i][0])
sorted_keywords_keys = " ".join(str(s) for s in sorted_keywords_keys)
return sorted_keywords_keys
"""
Function that returns a dictionary that contains a keyword and it's part
of speech.
"""
def wordForm(sorted_keywords):
document = language_client.document_from_text(sorted_keywords)
annotations = document.annotate_text().tokens
words = {}
for token in annotations:
word = token.text_content
pos = token.part_of_speech
words[word] = pos
return words
"""
This function returns the highest scoring noun in wotdForm.
"""
def firstNoun(wordForm):
nouns = [token for token in wordForm if wordForm[token] == "NOUN"]
if len(nouns) == 0:
return -1
else:
return nouns[0]
"""
Main method. This function sets up the main page by returning an HTML template.
"""
@app.route('/', methods = ['GET','POST'])
def main():
if request.method == 'POST':
user_input = request.form['submission']
submission = user_input.split(' ')
submission = request.form['submission']
if isQuestion(submission):
if isYesNoQuestion(submission):
response = random_response(submission);
return render_template("main.html", eightballresponse = response);
else:
keywords = keyword(submission)
sorted_keywords = sortKeywords(keywords)
words = wordForm(sorted_keywords)
noun = firstNoun(words)
response = "That's not a yes or no question, so I'm not sure if I can answer..."
if noun != -1:
if (noun == "lunch" or noun == "breakfast" or noun == "dinner"):
response = response + "I don't know what you should about " + noun + "."
else:
response = response + "I don't know what you should do about the " + noun + "."
return render_template("main.html", eightballresponse = response);
else:
return render_template("main.html", eightballresponse = "That's not a question. I think.");
else:
return render_template("main.html", eightballresponse = "You didn't ask me anything.");
""" This function returns the HTML template for the group members involved in this project."""
@app.route('/group')
def group():
return render_template('group.html')
if __name__ == '__main__':
app.run('0.0.0.0',port=3000)
| true |
ac08b61ccebd169a259706cbf8fc64795e79f0f7
|
Python
|
zhifanzhu/Local-Mid-Propagation
|
/mmdet/models/losses/bootstrapped_sigmoid_classification_loss.py
|
UTF-8
| 2,782 | 2.765625 | 3 |
[
"Apache-2.0"
] |
permissive
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import weight_reduce_loss
from ..registry import LOSSES
@LOSSES.register_module
class BootstrappedSigmoidClassificationLoss(nn.Module):
"""From Google's Object Detction API:
Bootstrapped sigmoid cross entropy classification loss function.
This loss uses a convex combination of training labels and the current model's
predictions as training targets in the classification loss. The idea is that
as the model improves over time, its predictions can be trusted more and we
can use these predictions to mitigate the damage of noisy/incorrect labels,
because incorrect labels are likely to be eventually highly inconsistent with
other stimuli predicted to have the same label by the model.
In "soft" bootstrapping, we use all predicted class probabilities, whereas in
"hard" bootstrapping, we use the single class favored by the model.
See also Training Deep Neural Networks On Noisy Labels with Bootstrapping by
Reed et al. (ICLR 2015).
"""
def __init__(self,
alpha,
bootstrap_type='soft',
reduction='mean',
loss_weight=1.0):
super(BootstrappedSigmoidClassificationLoss, self).__init__()
assert bootstrap_type in ('soft', 'hard')
self.alpha = alpha
self.bootstrap_type = bootstrap_type
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
num_classes = cls_score.size(-1)
target_tensor = F.one_hot(label, num_classes=num_classes).type(cls_score.dtype)
if self.bootstrap_type == 'soft':
bootstrap_target_tensor = self.alpha * target_tensor + (
1.0 - self.alpha) * torch.sigmoid(cls_score)
else:
bootstrap_target_tensor = self.alpha * target_tensor + (
1.0 - self.alpha) * (torch.sigmoid(cls_score) > 0.5).type(cls_score.dtype)
per_entry_cross_ent = F.cross_entropy(cls_score,
target=bootstrap_target_tensor.argmax(1),
reduction=reduction)
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(per_entry_cross_ent, weight,
reduction=reduction, avg_factor=avg_factor)
return loss
| true |
ab8eaac7282d3a3c45b375544a8ddb290676f8b2
|
Python
|
sympy/sympy
|
/sympy/core/containers.py
|
UTF-8
| 11,315 | 3.4375 | 3 |
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
"""Module for SymPy containers
(SymPy objects that store other SymPy objects)
The containers implemented in this module are subclassed to Basic.
They are supposed to work seamlessly within the SymPy framework.
"""
from collections import OrderedDict
from collections.abc import MutableSet
from typing import Any, Callable
from .basic import Basic
from .sorting import default_sort_key, ordered
from .sympify import _sympify, sympify, _sympy_converter, SympifyError
from sympy.core.kind import Kind
from sympy.utilities.iterables import iterable
from sympy.utilities.misc import as_int
class Tuple(Basic):
"""
Wrapper around the builtin tuple object.
Explanation
===========
The Tuple is a subclass of Basic, so that it works well in the
SymPy framework. The wrapped tuple is available as self.args, but
you can also access elements or slices with [:] syntax.
Parameters
==========
sympify : bool
If ``False``, ``sympify`` is not called on ``args``. This
can be used for speedups for very large tuples where the
elements are known to already be SymPy objects.
Examples
========
>>> from sympy import Tuple, symbols
>>> a, b, c, d = symbols('a b c d')
>>> Tuple(a, b, c)[1:]
(b, c)
>>> Tuple(a, b, c).subs(a, d)
(d, b, c)
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('sympify', True):
args = (sympify(arg) for arg in args)
obj = Basic.__new__(cls, *args)
return obj
def __getitem__(self, i):
if isinstance(i, slice):
indices = i.indices(len(self))
return Tuple(*(self.args[j] for j in range(*indices)))
return self.args[i]
def __len__(self):
return len(self.args)
def __contains__(self, item):
return item in self.args
def __iter__(self):
return iter(self.args)
def __add__(self, other):
if isinstance(other, Tuple):
return Tuple(*(self.args + other.args))
elif isinstance(other, tuple):
return Tuple(*(self.args + other))
else:
return NotImplemented
def __radd__(self, other):
if isinstance(other, Tuple):
return Tuple(*(other.args + self.args))
elif isinstance(other, tuple):
return Tuple(*(other + self.args))
else:
return NotImplemented
def __mul__(self, other):
try:
n = as_int(other)
except ValueError:
raise TypeError("Can't multiply sequence by non-integer of type '%s'" % type(other))
return self.func(*(self.args*n))
__rmul__ = __mul__
def __eq__(self, other):
if isinstance(other, Basic):
return super().__eq__(other)
return self.args == other
def __ne__(self, other):
if isinstance(other, Basic):
return super().__ne__(other)
return self.args != other
def __hash__(self):
return hash(self.args)
def _to_mpmath(self, prec):
return tuple(a._to_mpmath(prec) for a in self.args)
def __lt__(self, other):
return _sympify(self.args < other.args)
def __le__(self, other):
return _sympify(self.args <= other.args)
# XXX: Basic defines count() as something different, so we can't
# redefine it here. Originally this lead to cse() test failure.
def tuple_count(self, value) -> int:
"""Return number of occurrences of value."""
return self.args.count(value)
def index(self, value, start=None, stop=None):
"""Searches and returns the first index of the value."""
# XXX: One would expect:
#
# return self.args.index(value, start, stop)
#
# here. Any trouble with that? Yes:
#
# >>> (1,).index(1, None, None)
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# TypeError: slice indices must be integers or None or have an __index__ method
#
# See: http://bugs.python.org/issue13340
if start is None and stop is None:
return self.args.index(value)
elif stop is None:
return self.args.index(value, start)
else:
return self.args.index(value, start, stop)
@property
def kind(self):
"""
The kind of a Tuple instance.
The kind of a Tuple is always of :class:`TupleKind` but
parametrised by the number of elements and the kind of each element.
Examples
========
>>> from sympy import Tuple, Matrix
>>> Tuple(1, 2).kind
TupleKind(NumberKind, NumberKind)
>>> Tuple(Matrix([1, 2]), 1).kind
TupleKind(MatrixKind(NumberKind), NumberKind)
>>> Tuple(1, 2).kind.element_kind
(NumberKind, NumberKind)
See Also
========
sympy.matrices.common.MatrixKind
sympy.core.kind.NumberKind
"""
return TupleKind(*(i.kind for i in self.args))
_sympy_converter[tuple] = lambda tup: Tuple(*tup)
def tuple_wrapper(method):
"""
Decorator that converts any tuple in the function arguments into a Tuple.
Explanation
===========
The motivation for this is to provide simple user interfaces. The user can
call a function with regular tuples in the argument, and the wrapper will
convert them to Tuples before handing them to the function.
Explanation
===========
>>> from sympy.core.containers import tuple_wrapper
>>> def f(*args):
... return args
>>> g = tuple_wrapper(f)
The decorated function g sees only the Tuple argument:
>>> g(0, (1, 2), 3)
(0, (1, 2), 3)
"""
def wrap_tuples(*args, **kw_args):
newargs = []
for arg in args:
if isinstance(arg, tuple):
newargs.append(Tuple(*arg))
else:
newargs.append(arg)
return method(*newargs, **kw_args)
return wrap_tuples
class Dict(Basic):
"""
Wrapper around the builtin dict object.
Explanation
===========
The Dict is a subclass of Basic, so that it works well in the
SymPy framework. Because it is immutable, it may be included
in sets, but its values must all be given at instantiation and
cannot be changed afterwards. Otherwise it behaves identically
to the Python dict.
Examples
========
>>> from sympy import Dict, Symbol
>>> D = Dict({1: 'one', 2: 'two'})
>>> for key in D:
... if key == 1:
... print('%s %s' % (key, D[key]))
1 one
The args are sympified so the 1 and 2 are Integers and the values
are Symbols. Queries automatically sympify args so the following work:
>>> 1 in D
True
>>> D.has(Symbol('one')) # searches keys and values
True
>>> 'one' in D # not in the keys
False
>>> D[1]
one
"""
def __new__(cls, *args):
if len(args) == 1 and isinstance(args[0], (dict, Dict)):
items = [Tuple(k, v) for k, v in args[0].items()]
elif iterable(args) and all(len(arg) == 2 for arg in args):
items = [Tuple(k, v) for k, v in args]
else:
raise TypeError('Pass Dict args as Dict((k1, v1), ...) or Dict({k1: v1, ...})')
elements = frozenset(items)
obj = Basic.__new__(cls, *ordered(items))
obj.elements = elements
obj._dict = dict(items) # In case Tuple decides it wants to sympify
return obj
def __getitem__(self, key):
"""x.__getitem__(y) <==> x[y]"""
try:
key = _sympify(key)
except SympifyError:
raise KeyError(key)
return self._dict[key]
def __setitem__(self, key, value):
raise NotImplementedError("SymPy Dicts are Immutable")
def items(self):
'''Returns a set-like object providing a view on dict's items.
'''
return self._dict.items()
def keys(self):
'''Returns the list of the dict's keys.'''
return self._dict.keys()
def values(self):
'''Returns the list of the dict's values.'''
return self._dict.values()
def __iter__(self):
'''x.__iter__() <==> iter(x)'''
return iter(self._dict)
def __len__(self):
'''x.__len__() <==> len(x)'''
return self._dict.__len__()
def get(self, key, default=None):
'''Returns the value for key if the key is in the dictionary.'''
try:
key = _sympify(key)
except SympifyError:
return default
return self._dict.get(key, default)
def __contains__(self, key):
'''D.__contains__(k) -> True if D has a key k, else False'''
try:
key = _sympify(key)
except SympifyError:
return False
return key in self._dict
def __lt__(self, other):
return _sympify(self.args < other.args)
@property
def _sorted_args(self):
return tuple(sorted(self.args, key=default_sort_key))
def __eq__(self, other):
if isinstance(other, dict):
return self == Dict(other)
return super().__eq__(other)
__hash__ : Callable[[Basic], Any] = Basic.__hash__
# this handles dict, defaultdict, OrderedDict
_sympy_converter[dict] = lambda d: Dict(*d.items())
class OrderedSet(MutableSet):
def __init__(self, iterable=None):
if iterable:
self.map = OrderedDict((item, None) for item in iterable)
else:
self.map = OrderedDict()
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
self.map[key] = None
def discard(self, key):
self.map.pop(key)
def pop(self, last=True):
return self.map.popitem(last=last)[0]
def __iter__(self):
yield from self.map.keys()
def __repr__(self):
if not self.map:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.map.keys()))
def intersection(self, other):
return self.__class__([val for val in self if val in other])
def difference(self, other):
return self.__class__([val for val in self if val not in other])
def update(self, iterable):
for val in iterable:
self.add(val)
class TupleKind(Kind):
"""
TupleKind is a subclass of Kind, which is used to define Kind of ``Tuple``.
Parameters of TupleKind will be kinds of all the arguments in Tuples, for
example
Parameters
==========
args : tuple(element_kind)
element_kind is kind of element.
args is tuple of kinds of element
Examples
========
>>> from sympy import Tuple
>>> Tuple(1, 2).kind
TupleKind(NumberKind, NumberKind)
>>> Tuple(1, 2).kind.element_kind
(NumberKind, NumberKind)
See Also
========
sympy.core.kind.NumberKind
MatrixKind
sympy.sets.sets.SetKind
"""
def __new__(cls, *args):
obj = super().__new__(cls, *args)
obj.element_kind = args
return obj
def __repr__(self):
return "TupleKind{}".format(self.element_kind)
| true |
5e3e0d8e5be24172b13d78b40738af7b2c53e418
|
Python
|
nvnnv/Leetcode
|
/1717. Maximum Score From Removing Substrings.py
|
UTF-8
| 699 | 2.875 | 3 |
[] |
no_license
|
class Solution:
def maximumGain(self, s: str, x: int, y: int) -> int:
if x > y:
df = ['ab', 'ba']
p = [x, y]
else:
df = ['ba', 'ab']
p = [y, x]
q = []
x = s
ans =0
for i in range(len(df)):
for j in range(len(x)):
q.append(x[j])
while len(q) > 1:
if ''.join(q[-2:]) == df[i]:
ans += p[i]
q.pop(-1)
q.pop(-1)
else:
break
x = ''.join(q)
q = []
return ans
| true |
fd184647b32187f497b68c383b444e03578ea89e
|
Python
|
leisquare/study_bigdata
|
/10_Django/ch02_wordcnt/wordcnt/views.py
|
UTF-8
| 781 | 2.6875 | 3 |
[] |
no_license
|
from django.shortcuts import render
# Create your views here.
def wordinput(request):
return render(request, "wordinput.html")
def result(request):
full = request.GET['fulltext'] #안녕 방가 안녕
strlength = len(full)
words= full.split() # ['안녕', '방가', '안녕']
words_dic = {} #{'안녕':,'방가':1}
for word in words:
if word in words_dic:
words_dic[word] += 1
else:
words_dic[word] = 1
return render(request, 'result.html',{'full':full,
'strlength':strlength,
'cnt':len(words),
'dic':words_dic.items()})
def about(request):
return render(request, "about.html")
| true |
d2b877bb7c98b93800d88a573a34992ec57840de
|
Python
|
Dmevo/WebTheater
|
/application/model/entity/categoria.py
|
UTF-8
| 1,142 | 2.671875 | 3 |
[] |
no_license
|
class Categoria:
def __init__(self, id = None, nome = None, url_foto = None, descricao_categoria = None, lista_video = None):
self.id = id
self.nome = nome
self.quantidade_videos = 0
self.url_foto = url_foto
self.lista_video = lista_video
def set_nome(self, nome):
self.nome = nome
def get_nome(self):
return self.nome
def set_quantidade_videos(self, quantidade_videos):
self.quantidade_videos = quantidade_videos
def get_quantidade_videos(self):
return self.quantidade_videos
def set_url_foto(self, url_foto):
self.url_foto = url_foto
def get_url_foto(self):
return self.url_foto
def set_descricao_categoria(self, descricao_categoria):
self.descricao_categoria = descricao_categoria
def get_descricao_categoria(self):
return self.descricao_categoria
def add_lista_video(self, video):
self.lista_video.append(video)
def get_lista_video(self):
return self.lista_video
def set_id(self, id):
self.id = id
def get_id(self):
return self.id
| true |
73d08c117822ded5988478ca1cec380c15168919
|
Python
|
nuarlyss/python_test_kemampuan_dasar
|
/matauang.py
|
UTF-8
| 232 | 3 | 3 |
[] |
no_license
|
#konversi mata uang usd2eur
dmatauang2idr = {'IDR': 1, 'USD': 14425, 'EUR': 16225}
def usd2eur(usd):
USD = dmatauang2idr['USD'] * usd
EUR = USD / dmatauang2idr['EUR']
value = EUR
return value
print(usd2eur(100))
| true |
a5dfd8abba90e103d6cd5da1ff3ddedeae641be0
|
Python
|
gabriellaj45/BoardBooster
|
/matchTemplate.py
|
UTF-8
| 1,697 | 2.828125 | 3 |
[] |
no_license
|
import cv2
import os
from histogramColorClassifier import HistogramColorClassifier
def matchTemplate(image):
histClassifier = HistogramColorClassifier(channels=[0, 1, 2],
hist_size=[128, 128, 128],
hist_range=[0, 256, 0, 256, 0, 256],
hist_type='BGR')
index = 0
for filename in os.listdir('static/cardTemplates/'):
path = "static/cardTemplates/"
varName = cv2.imread(path + filename)
histClassifier.addModelHistogram(varName, filename)
index = index + 1
if index == 0:
return
image = cv2.imread(image)
if image is not None:
return histClassifier.returnBestMatchName(image)
else:
print('no image')
'''
import cv2
import os
import numpy as np
class matchTemplate:
def __init__(self, image, game):
self.game = game
self.image = image
self.templateMatch = None
self.findBestMatch()
def findBestMatch(self):
bestTemplateMatch = 100000000000000000
templateMatchName = None
for filename in os.listdir(self.game + '/cardTemplates/'):
path = self.game + "/cardTemplates/"
template = cv2.imread(path + filename)
card = cv2.imread(self.image)
diff_img = cv2.absdiff(card, template)
templateDiff = int(np.sum(diff_img) / 255)
if templateDiff < bestTemplateMatch:
bestTemplateMatch = templateDiff
templateMatchName = filename
self.templateMatch = templateMatchName
return templateMatchName
'''
| true |
0565bcc445d34aa61db09332862ac4598dc94ba5
|
Python
|
vlmikov/Python
|
/Multidimensional lists/03. Primary Diagonal.py
|
UTF-8
| 448 | 3.59375 | 4 |
[] |
no_license
|
def sum_primary_diagonal(matrix):
the_sum = 0
rows = len(matrix)
for r in range(rows):
the_sum += matrix[r][r]
return the_sum
def read_matrix(size):
matrix = []
rows = size
for r in range(rows):
current = list(map(int, input().split(" ")))
matrix.append(current)
return matrix
size = int(input())
matrix = read_matrix(size)
print(sum_primary_diagonal(matrix))
| true |
1abc7e4fb415806ad8349bb51dd35f9a08f194ec
|
Python
|
stryker2k2/examprep
|
/CR_Training_Student_VS2015/Python/01. Python Practice/test_cases.py
|
UTF-8
| 3,082 | 2.609375 | 3 |
[] |
no_license
|
import unittest
from test_module import testDir, chngLetters, correctSentences, reverseWords, obfuscatedStrings, combineSentences, key,\
insertSentences, deleteWords, correctDict, ERROR_INVALID, ERROR_NOT_FOUND
import test_code
class TestDictionaryMethod(unittest.TestCase):
def test_dictionary(self):
output = test_code.FileDict(testDir)
for key in output:
output[key].sort()
self.assertEqual(output, correctDict)
class TestChangeLetter(unittest.TestCase):
def test_ChangeLetter(self):
output = test_code.ChangeLetter(chngLetters['str1'], chngLetters['bad1'], chngLetters['good1'])
self.assertEqual(output, correctSentences[0])
def test_ChangeLetterBad(self):
output = test_code.ChangeLetter(chngLetters['str2'], chngLetters['bad2'], chngLetters['good2'])
self.assertEqual(output, ERROR_NOT_FOUND)
def test_ChangeLetterNone(self):
output = test_code.ChangeLetter('', 'a', 'x')
self.assertEqual(output, ERROR_INVALID)
class TestReverseWord(unittest.TestCase):
def test_ReverseWord(self):
output = test_code.ReverseWord(reverseWords['str1'], reverseWords['bad1'])
self.assertEqual(output, correctSentences[1])
def test_ReverseWordBad(self):
output = test_code.ReverseWord(reverseWords['str2'], reverseWords['bad2'])
self.assertEqual(output, ERROR_NOT_FOUND)
def test_ReverseWordNone(self):
output = test_code.ReverseWord('', reverseWords['bad2'])
self.assertEqual(output, ERROR_INVALID)
class TestObfuscateString(unittest.TestCase):
def test_ObfuscateString(self):
output = test_code.ObfuscateString(obfuscatedStrings[0], key[0])
self.assertEqual(output, correctSentences[2])
def test_ObfuscateStringNone(self):
output = test_code.ObfuscateString(obfuscatedStrings[0], key[1])
self.assertEqual(output, ERROR_INVALID)
class TestCombineStrings(unittest.TestCase):
def test_CombineStrings(self):
output = test_code.CombineStrings(combineSentences[0], combineSentences[1])
self.assertEqual(output, correctSentences[3])
def test_CombineStringsNone(self):
output = test_code.CombineStrings('', combineSentences[1])
self.assertEqual(output, ERROR_INVALID)
class TestInsertWord(unittest.TestCase):
def test_InsertWord(self):
output = test_code.InsertWord(insertSentences['str1'], insertSentences['ins1'], insertSentences['tgt1'])
self.assertEqual(output, correctSentences[4])
def test_InsertWordNone(self):
output = test_code.InsertWord(insertSentences['str1'], '', insertSentences['tgt1'])
self.assertEqual(output, ERROR_INVALID)
class TestDeleteWord(unittest.TestCase):
def test_DeleteWord(self):
output = test_code.DeleteWord(deleteWords['str1'], deleteWords['del1'])
self.assertEqual(output, correctSentences[5])
def test_DeleteWordBad(self):
output = test_code.DeleteWord(deleteWords['str2'], deleteWords['del2'])
self.assertEqual(output, ERROR_NOT_FOUND)
| true |
99f37f9582965de26cb027ffb2d65b5ed9fe0f1f
|
Python
|
pvu1984/cage-challenge-2
|
/CybORG/CybORG/Agents/Wrappers/TrueTableWrapper.py
|
UTF-8
| 3,796 | 2.65625 | 3 |
[
"MIT"
] |
permissive
|
from copy import deepcopy
from pprint import pprint
from prettytable import PrettyTable
from CybORG.Shared.Enums import TrinaryEnum
from CybORG.Agents.Wrappers.BaseWrapper import BaseWrapper
class TrueTableWrapper(BaseWrapper):
def __init__(self,env=None,agent=None, observer_mode=True):
super().__init__(env,agent)
self.scanned_ips = set()
self.step_counter = -1
self.observer_mode = observer_mode
def reset(self, agent=None):
self.scanned_ips = set()
self.step_counter = -1
result = self.env.reset(agent)
result.observation = self.observation_change(result.observation)
return result
def observation_change(self,observation):
self.step_counter +=1
self._update_scanned()
return observation if self.observer_mode else self._create_true_table()
def get_table(self):
return self._create_true_table()
def _update_scanned(self):
if self.step_counter <= 0:
return
action = self.get_last_action(agent='Red')
if action.__class__.__name__ == 'DiscoverNetworkServices':
red_obs = deepcopy(self.get_observation(agent='Red'))
success = red_obs['success']
if success:
ip = red_obs.popitem()[0]
self.scanned_ips.add(ip)
def _create_true_table(self):
true_obs = deepcopy(self.env.get_agent_state('True'))
success = true_obs.pop('success')
table = PrettyTable([
'Subnet',
'IP Address',
'Hostname',
'Known',
'Scanned',
'Access',
])
for hostid in true_obs:
host = true_obs[hostid]
for interface in host['Interface']:
ip = interface['IP Address']
if str(ip) == '127.0.0.1':
continue
if 'Subnet' not in interface:
continue
subnet = interface['Subnet']
hostname = host['System info']['Hostname']
action_space = self.get_action_space(agent = 'Red')
known = action_space['ip_address'][ip]
scanned = True if str(ip) in self.scanned_ips else False
access = self._determine_red_access(host['Sessions'])
table.add_row([subnet,str(ip),hostname,known,scanned,access])
table.sortby = 'Hostname'
table.success = success
return table
def _determine_red_access(self,session_list):
for session in session_list:
if session['Agent'] != 'Red':
continue
privileged = session['Username'] in {'root','SYSTEM'}
return 'Privileged' if privileged else 'User'
return 'None'
def get_attr(self,attribute:str):
return self.env.get_attr(attribute)
def get_observation(self, agent: str):
return self.get_attr('get_observation')(agent)
def get_agent_state(self,agent:str):
if agent == 'True':
output = self.get_table()
else:
output = self.get_attr('get_agent_state')(agent)
return output
def get_action_space(self,agent):
return self.get_attr('get_action_space')(agent)
def get_last_action(self,agent):
return self.get_attr('get_last_action')(agent)
def get_ip_map(self):
return self.get_attr('get_ip_map')()
def get_rewards(self):
return self.get_attr('get_rewards')()
def true_obs_to_table(true_obs,env):
print('Scanned column likely inaccurate.')
wrapper = TrueTableWrapper(env,observer_mode=False)
wrapper.step_counter = 1
return wrapper.observation_change(true_obs)
| true |
245735d9adf8a9fb6499490e839447eb6189fd16
|
Python
|
itsolutionscorp/AutoStyle-Clustering
|
/all_data/exercism_data/python/bob/81246fcb208d4654bf83664b46f9825d.py
|
UTF-8
| 641 | 3.46875 | 3 |
[] |
no_license
|
import re
class Bob:
def hey(self,str):
'''
if(str.strip() == ''):
return 'Fine. Be that way!'
if(re.match(r"[A-Z]",str) and re.match(r"[a-z]",str) != None):
return 'Woah, chill out!'
if(re.match(r"\?\Z",str)):
return "Sure."
return "Whatever."
'''
if not str.strip():
return "Fine. Be that way!"
# Then test if Bob is shouted at
elif str.isupper():
return "Woah, chill out!"
# Detect questions
elif str.endswith("?"):
return "Sure."
# At last handle everthing else if nothing from above applies
else:
return "Whatever."
| true |
0452dd36c744042acb64074f64169ed5a20d915d
|
Python
|
lileijava/lianjia_crawl
|
/transposed_test.py
|
UTF-8
| 95 | 3.203125 | 3 |
[] |
no_license
|
array = [[1,2,3,4],[5,6,9],[7,8]]
transposed = zip(*array)
print(array)
print(list(transposed))
| true |
5ade3248d1c814857ecc19af8ad95384952effc9
|
Python
|
msbhati08/Airflow
|
/src/unittest/dags/my_dag_tests.py
|
UTF-8
| 1,777 | 2.71875 | 3 |
[
"Unlicense"
] |
permissive
|
import unittest
from airflow.models import DagBag
class TestMySampleDAG(unittest.TestCase):
"""Check MyDAG expectation"""
def setUp(self):
self.dagbag = DagBag()
def test_task_count(self):
"""Check task count of MyDAG"""
dag_id = 'simple-airflow'
dag = self.dagbag.get_dag(dag_id)
self.assertEqual(len(dag.tasks), 2)
def test_contain_tasks(self):
"""Check task contains in MyDAG"""
dag_id = 'simple-airflow'
dag = self.dagbag.get_dag(dag_id)
tasks = dag.tasks
task_ids = list(map(lambda task: task.task_id, tasks))
self.assertListEqual(task_ids, ['hello_task1', 'dummy_task1'])
def test_dependencies_of_dummy_task1(self):
"""Check the task dependencies of dummy_task in MyDAG"""
dag_id = 'simple-airflow'
dag = self.dagbag.get_dag(dag_id)
dummy_task = dag.get_task('dummy_task1')
upstream_task_ids = list(map(lambda task: task.task_id, dummy_task.upstream_list))
self.assertListEqual(upstream_task_ids, [])
downstream_task_ids = list(map(lambda task: task.task_id, dummy_task.downstream_list))
self.assertListEqual(downstream_task_ids, ['hello_task1'])
def test_dependencies_of_hello_task1(self):
"""Check the task dependencies of hello_task in MyDAG"""
dag_id = 'simple-airflow'
dag = self.dagbag.get_dag(dag_id)
hello_task = dag.get_task('hello_task1')
upstream_task_ids = list(map(lambda task: task.task_id, hello_task.upstream_list))
self.assertListEqual(upstream_task_ids, ['dummy_task1'])
downstream_task_ids = list(map(lambda task: task.task_id, hello_task.downstream_list))
self.assertListEqual(downstream_task_ids, [])
| true |
a8343c53088be203d6b4a36803e909fe805ef665
|
Python
|
dreadlordow/Python-Fundamentals
|
/3.Lists_basics/4.Search.py
|
UTF-8
| 260 | 3.609375 | 4 |
[] |
no_license
|
n = int(input())
word = input()
string = []
for _ in range(n):
sentence = input()
string.append(sentence)
string_with_word = []
for j in string :
if word in j :
string_with_word.append(j)
print(string)
print(string_with_word)
| true |
b74fe0b51faed63613cf1d30dca34a6a30a9ad04
|
Python
|
leventarican/cookbook
|
/python/import_modules.py
|
UTF-8
| 597 | 3.828125 | 4 |
[] |
no_license
|
# we have build in functions
print('for sure this one is build in')
print( 'python chars [use of str and len functions]: ' + str(len('python')) )
# now lets extend our vocabulary; like random, sys, os and math
import random # import module called random
a_random_number = random.randint(0,9)
print(a_random_number)
# another option
from math import *
print(pi)
# import a function
from math import cos
print(cos(1))
# or just improt module path from os and use alias
from os import path as ospath
print(ospath.abspath('.'))
# or import __our__ function
import functions
functions.printPI()
| true |
7aa381bebf30f22240036f29dbc4a8fe3766e829
|
Python
|
NikilMunireddy/SmartChain
|
/SmartChain/RestAPI/RestAPI/API/Packages/auth_api/RandomNumberPool/delete_number.py
|
UTF-8
| 793 | 2.890625 | 3 |
[] |
no_license
|
import argparse
from .. import db_connection
# Delets a number from database and returns the count of rows removed
def delete_number(number,conn):
conn =db_connection.get_connection()
cursor = conn.cursor()
SQL_QUERY = "DELETE FROM random_number_pool WHERE random_number =%s"
value=(number,)
cursor.execute(SQL_QUERY,value)
conn.commit()
print(cursor.rowcount, "record(s) deleted")
return cursor.rowcount
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Insert delete DB")
parser.add_argument('-n', '--number', type=str, required=True ,help="Enter a number and chech if it is present in number pool if yes then the number will be removed")
args = parser.parse_args()
number = args.number
# 535689400738783
| true |
41d23b98eb1704f1af017139bc1f4111b2ce9adb
|
Python
|
xiaoloinzi/pycharm_02
|
/20190422/make_excel_file.py
|
UTF-8
| 547 | 3.046875 | 3 |
[] |
no_license
|
#encoding = "utf-8"
import locale,datetime,time
from openpyxl import Workbook
wb = Workbook()#创建文件对象
ws = wb.active #获取第一个sheet
ws['A1'] = 42 #写入数字
ws['B1'] = "光荣之路"+"automation test"#写入中文
ws.append([1,2,3])#写入多个单元格
ws['A2'] = datetime.datetime.now()#写入一个当前时间
#写入一个自定义的时间格式
locale.setlocale(locale.LC_CTYPE,'chinese')
ws['A3'] = time.strftime("%Y年%m月%d日 %H时%M分%S秒",time.localtime())
# 保存到文件中
wb.save("E:\\sample.xlsx")
| true |
2311ff4cf28f72c20b4f2e012ae41bd970fa46c0
|
Python
|
rogueflynn/NetworkProgramming
|
/pythonCode/xmlParsing.py
|
UTF-8
| 219 | 2.796875 | 3 |
[] |
no_license
|
import xml.etree.ElementTree as Et
xmlData = "<data><email>vgvgonzalez8@gmail.com</email><message>hello</message></data>"
tree = Et.ElementTree(Et.fromstring(xmlData))
root = tree.getroot()
print(root[1].text)
print(root[0].text)
| true |
638d2840ae1524530ad03071237ebbf59eb80644
|
Python
|
Vagacoder/Python_for_everyone
|
/Ch05/2017-8-21_1.py
|
UTF-8
| 1,228 | 4 | 4 |
[] |
no_license
|
## Ch05 P 5.15
def reverse(string):
if string == "":
return string
else:
return reverse(string[1:]) + string[0]
print(reverse('flow'))
## Ch05 P 5.16
def isPalindrome(string):
if len(string) <2:
return True
if string[0] == string[-1]:
test = True
else:
test = False
return isPalindrome(string[1:-1]) and test
print(isPalindrome('rt'))
## Ch05 P 5.17
def find(string1, string2):
if len(string1) < len(string2):
return False
if string1[0:len(string2)] == string2:
test = True
else:
test = False
return find(string1[1:], string2) or test
print(find('Missipssppi', 'sip'))
## Ch05 P 5.19
def exponential(a, n):
if n == 1:
return a
return a * exponential(a, n-1)
print(exponential(3,3))
## Ch05 P 5.20
def leap_year():
year = int(input('please enter the year: '))
if year%4 == 0:
if year%400 == 0:
return 'is a leap year.'
elif year%100 == 0:
return 'is NOT a leap year.'
else:
return 'is a leap year.'
else:
return 'is NOT a leap year.'
print(leap_year())
| true |
83101b05aca3a9f4da1e71baf1d2c72bf579cc24
|
Python
|
dashboardijo/algorithm007-class02
|
/Week_06/G20200343040180/coinChange.py
|
UTF-8
| 784 | 3.59375 | 4 |
[] |
no_license
|
#!/usr/bin/env python
'''
https://leetcode-cn.com/problems/coin-change/description/
322. 零钱兑换
给定不同面额的硬币 coins 和一个总金额 amount。编写一个函数来计算可以凑成总金额所需的最少的硬币个数。如果没有任何一种硬币组合能组成总金额,返回 -1。
示例 1:
输入: coins = [1, 2, 5], amount = 11
输出: 3
解释: 11 = 5 + 5 + 1
示例 2:
输入: coins = [2], amount = 3
输出: -1
说明:
你可以认为每种硬币的数量是无限的。
'''
def coinChange(coins, amount):
dp = [float('inf')] * (amount + 1)
dp[0] = 0
for coin in coins:
for j in range(coin, amount + 1):
dp[i] = min(dp[j - coin] + 1, dp[i])
return dp[amount] if dp[amount] != float('inf') else -1
| true |
09cecc7597bed65fda07a55f0d6cd82a392c3bb7
|
Python
|
Kenn3Th/Physics_at_UiO
|
/INF1100/Project/SIR_class.py
|
UTF-8
| 3,453 | 3.140625 | 3 |
[] |
no_license
|
class ProblemSIR():
def __init__(self, nu, beta, S0, I0, R0, T):
if isinstance(nu, (float,int)):
self.nu = lambda t: nu
elif callable(nu):
self.nu = nu
if isinstance(beta, (float,int)):
self.beta = lambda t: beta
elif callable(beta):
self.beta = beta
self.S0, self.I0, self.R0, self.T = S0, I0, R0, T
def __call__(self, u, t):
S, I, R = u
return [-self.beta(t)*S*I, self.beta(t)*S*I - self.nu(t)*I,\
self.nu(t)*I]
def initial_value(self):
return self.S0, self.I0, self.R0
def time_points(self,dt):
import numpy as np
self.dt = dt
t = np.linspace(0,self.T, self.T/float(self.dt))
return t
class SolverSIR():
import ODEsolver as ODE
def __init__(self, problem, dt):
self.problem, self.dt = problem, dt
def solve(self, method=ODE.RungeKutta4):
import numpy as np
self.solver = method(self.problem)
ic = [self.problem.S0, self.problem.I0, self.problem.R0]
self.solver.set_initial_condition(ic)
n = int(round(self.problem.T/float(self.dt)))
t = np.linspace(0, self.problem.T, n+1)
u , self.t = self.solver.solve(t)
self.S, self.I, self.R = u[:,0], u[:,1], u[:,2]
def plot(self):
import matplotlib.pyplot as plt
S, I, R, t = self.S, self.I, self.R, self.t
plt.plot(t,S,t,I,t,R)
plt.legend(['Motagelig for sykdom', 'Smitta', 'Friske "meldt"'])
plt.axis([0,60,0,2000])
plt.xlabel('Dager')
plt.ylabel('Personer')
plt.title('SolverSIR')
plt.show()
if __name__ == '__main__': #lager denne fordi jeg skal importere i et annet program
import ODEsolver as ODE, matplotlib.pyplot as plt
def betta(t): #funksjon for beta
betta = 0
if t<=12:
betta = 0.0005
else:
betta = 0.0001
return betta
dt = 0.5 #steg lengde
problem = ProblemSIR(nu=0.1, beta=betta, S0=1500, I0=1, R0=0, T=60)
solver = ODE.RungeKutta4(problem)
solver.set_initial_condition(problem.initial_value())
y, x = solver.solve(problem.time_points(dt))
S = y[:,0]; I = y[:,1]; R = y[:,2]
#plott for ProblemSIR
plt.plot(x,S,x,I,x,R)
plt.legend(['Motagelig for sykdom', 'Smitta', 'Friske "meldt"'])
plt.axis([0,60,0,2000])
plt.xlabel('Dager')
plt.ylabel('Personer')
plt.title('ProblemSIR')
plt.show()
#plott for SolverSIR
prob = SolverSIR(problem,dt)
prob.solve()
prob.plot()
"""
Naar jeg sammenligner grafene fra ProblemSIR, SolverSIR og SIR.py
er det forskjell paa de smitta.
I SIR.py er max smitta oppe i ca 900 personer paa en gang.
Det er en veldig liten forskjell paa grafene fra ProblemSIR og SolverSIR
disse har max smitta paa ca 750 personer paa engang.
Naar vi ser paa antall motagelig for smitte ser vi at ikke alle har blitt smittet
i ProblemSIR og SolverSIR. Det er igjen litt under 200 som ikke er blitt smittet.
jeg bruker false i terminate funksjonen min pga av dette staar i ODEsolveren:
Compute solution u for t values in the list/array
time_points, as long as terminate(u,t,step_no) is False.
terminate(u,t,step_no) is a user-given function
returning True or False. By default, a terminate
function which always returns False is used.
Terminal> python SIR_class.py
"""
| true |
8bb7a2a80e47813e3320cae492ed6ba341484fe4
|
Python
|
motivatedLeroy/WebIntelligence
|
/Analysis/subscribed_users_by_events.py
|
UTF-8
| 1,413 | 2.75 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
# Count the number of events per subscribed user and print the result to
# subscribed_users_by_events.txt
import json
from operator import itemgetter
import os
input_fname = 'one_week/20170101'
output_dir = 'results/'
subscribed_uids_fname = output_dir + 'subscribed_userIds.txt'
output_fname = output_dir + 'subscribed_users_by_events.txt'
subscribed_uids = set()
with open(subscribed_uids_fname) as f:
for line in f:
subscribed_uids.add(line.strip())
# counts number of events grouped by userId
events_by_uid = {}
with open(input_fname) as f:
for line in f:
obj = json.loads(line.strip())
uid = obj['userId']
if uid in subscribed_uids:
if uid not in events_by_uid:
events_by_uid[uid] = 1
else:
events_by_uid[uid] += 1
# count number of userIds grouped by number of events per userId
uids_by_events_count = {}
for uid, events_count in events_by_uid.items():
if events_count not in uids_by_events_count:
uids_by_events_count[events_count] = 1
else:
uids_by_events_count[events_count] += 1
os.makedirs(output_dir, exist_ok=True)
with open(output_fname, 'w') as f:
for events_count, uids_count in sorted(uids_by_events_count.items(),
key=itemgetter(0)):
print('\t'.join([str(events_count), str(uids_count)]), file=f)
| true |
e0fb84114f4bc52281d2b36ed2b183f157adbfbd
|
Python
|
amitgajbhiye/box-embeddings
|
/box_embeddings/modules/intersection/tf_hard_intersection.py
|
UTF-8
| 1,467 | 2.890625 | 3 |
[] |
no_license
|
from typing import List, Tuple, Union, Dict, Any, Optional
import tensorflow as tf
from box_embeddings.parameterizations import TFTBoxTensor
from box_embeddings.modules.intersection._tf_intersection import (
_TFIntersection,
)
def tf_hard_intersection(
left: TFTBoxTensor, right: TFTBoxTensor
) -> TFTBoxTensor:
"""Hard Intersection operation as a function.
note:
This function can give fipped boxes, i.e. where z[i] > Z[i]
todo:
Add support for broadcasting
Args:
left: BoxTensor which is the left operand
right: BoxTensor which is the right operand
Returns:
The resulting BoxTensor obtained by interection.
It has the same concrete type as the `self` (left operand).
"""
t1 = left
t2 = right
z = tf.math.maximum(t1.z, t2.z)
Z = tf.math.minimum(t1.Z, t2.Z)
return left.from_zZ(z, Z)
@_TFIntersection.register("hard")
class TFHardIntersection(_TFIntersection):
"""Hard intersection operation as a Layer/Module"""
def __call__(
self, left: TFTBoxTensor, right: TFTBoxTensor
) -> TFTBoxTensor:
"""Gives intersection of self and other.
Args:
left: First operand for intersection
right: Second operand
Returns:
Intersection box
Note:
This function can give fipped boxes, i.e. where z[i] > Z[i]
"""
return tf_hard_intersection(left, right)
| true |
3c2f1dac50b4c3cdbb67136d69ef33059594708b
|
Python
|
Chen-Yufeng/ComicDownloader
|
/main.py
|
UTF-8
| 2,672 | 2.78125 | 3 |
[] |
no_license
|
#!/bin/python3
from bs4 import BeautifulSoup
import requests
import sys
import time
import os
import re
import img2pdf
def validate_file_name(origi_name):
rstr = r"[\/\\\:\*\?\"\<\>\|\.]" # '/ \ : * ? " < > |'
new_title = re.sub(rstr, "_", origi_name) # 替换为下划线
return new_title
def get_html(url):
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
except requests.RequestException:
return None
def get_title(soup):
metas = soup.head.find_all('meta')
for meta in metas:
if meta.get('property') == 'og:title':
return meta.get('content')
return 'default-' + str(time.time())
def get_image_links(soup: BeautifulSoup):
items = soup.find_all('img', class_='rich_pages')
return [i.get('data-src') for i in items]
def download_images(title, image_links):
downloads_path = os.path.join('.', 'Downloads', validate_file_name(title))
if not os.path.exists(downloads_path):
os.makedirs(downloads_path)
index = 1
for link in image_links:
response = requests.get(link)
if response.status_code != 200:
retry = 0
while True:
retry = retry + 1
print('Retry-%d: Image %03d' % (retry, index))
response.close()
response = requests.get(link)
if (response.status_code == 200):
break
with open(os.path.join(downloads_path, '%03d.jpg' % index), 'wb') as f:
f.write(response.content)
response.close()
index = index + 1
return downloads_path
def generate_pdf(title, path):
pdf_path = os.path.join(path, validate_file_name(title) + '.pdf')
with open(pdf_path, 'wb') as f:
imgs = []
for fname in os.listdir(path):
if not fname.endswith('.jpg'):
continue
jpg_path = os.path.join(path, fname)
if os.path.isdir(jpg_path):
continue
imgs.append(jpg_path)
imgs.sort()
f.write(img2pdf.convert(imgs))
def main(url):
html = get_html(url)
# with open('1.html', 'w') as f:
# f.write(html)
soup = BeautifulSoup(html, 'lxml')
title = get_title(soup)
image_links = get_image_links(soup)
downloads_path = download_images(title, image_links)
generate_pdf(title, downloads_path)
return 0
if __name__ == '__main__':
print('Comic Image Downloader V0.1.2')
while True:
link = input('Enter link: ')
if link.lower() == 'bye':
exit()
main(link)
| true |
312abd4421fb765070eb2d34947092b919bd0d45
|
Python
|
zengcong1314/python1205
|
/lesson03_data_type_list/homework.py
|
UTF-8
| 2,255 | 4.4375 | 4 |
[] |
no_license
|
# 1、.删除如下列表中的"矮穷丑",写出 2 种或以上方法:
info = ["yuze", 18, "男", "矮穷丑", ["高", "富", "帅"], True, None, "狼的眼睛是啥样的"]
info.remove("矮穷丑")
print(info)
# info.pop(3)
# print(info)
# del info[3]
# print(info)
#
# 2、现在有一个列表 li2=[1,2,3,4,5],
# 请通过相关的操作改成li2 = [0,1,2,3,66,4,5,11,22,33],
li2=[1,2,3,4,5]
li2.insert(0,0)
li2.insert(4,66)
li2.extend([11,22,33])
print(li2)
#请写出删除列表中元素的方法,并说明每个方法的作用
li2.remove(3)#删除值为3的一个元素
print(li2)
li2.pop(5) # 删除索引值为5的元素
print(li2)
del li2[0] # 删除索引为0的元素
print(li2)
li2.clear()# 清空列表当中的元素
print(li2)
# 一、请指出下面那些为可变类型的数据,那些为不可变类型的数据
# 1、 (11)
print(type((11))) #整形int(11)为不可变类型
# 2、 [11,22]
print(type([11,22])) #列表为可变类型
# 3、 ([11,22,33])
print(type(([11,22,33]))) #列表为可变类型
# 二、当前有一个列表 li = [11,22,33,22,22,44,55,77,88,99,11],
# 删除 77,88,99这三个元素
li = [11,22,33,22,22,44,55,77,88,99,11]
li.remove(77)
li.pop(7)
del li[7]
print(li)
# 三,将上个作业的相亲节目用列表实现
# 某相亲节目需要获取你的个人信息,请存储你的:姓名、性别、年龄,
info = []
info.extend(["曾聪","femal",18])
# b. 有一个人对你很感兴趣,平台需要您补足您的身高和联系方式;
info.extend([160,13967898761])
print("增加身高和联系方式后信息为:{}".format(info))
# c, 平台为了保护你的隐私,需要你删除你的联系方式;
info.pop(4)
print("删除联系方式后的信息为:{}".format(info))
#
# d, 你为了取得更好的成绩,需要取一个花名,并修改自己的身高和其他你觉得需要改的信息。
info[0] = "兮洛"
info.append("兮洛")
info[3] = 165
print("修改后的花名为:{},身高为{}".format(info[0],info[3]))
# e, 你进一步添加自己的兴趣,至少需要 3 项。
# info.extend(["看书","芭蕾","瑜伽"])
info.append(["看书","芭蕾","瑜伽"])
info.extend(["看书","芭蕾","瑜伽"])
print(info)
| true |
d007f372f47a8940a12679b6ade7e6cbad0f30e1
|
Python
|
imSamiul/Python_Exercrsises
|
/raf3.py
|
UTF-8
| 512 | 3.625 | 4 |
[] |
no_license
|
def exercise():
exe = input("Which exercise have you taken?\n")
return exe
def diet():
die = input("Which food have you taken?\n")
return die
j = 1
while j <= 5:
x = input("What do you want for lock?\n"
"1 for 'Diet'\n"
"2 for 'Exercise\n"
"You can try 5 times.\n")
if x == "1":
v = diet()
break
elif x == "2":
exercise()
else:
print(x)
print(f' You have {5 - j} term left')
j = j + 1
| true |
3408af5b253feb459f06e17353a53679b21842b7
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02714/s038637276.py
|
UTF-8
| 427 | 2.90625 | 3 |
[] |
no_license
|
N = int(input())
S = input()
ris = [i for i, s in enumerate(S) if s == "R"]
gis = [i for i, s in enumerate(S) if s == "G"]
bis = [i for i, s in enumerate(S) if s == "B"]
all = len(ris) * len(gis) * len(bis)
cnt = 0
for i in range(N):
for j in range(i+1, N):
k = 2*j - i
if 0 <= k < N:
if S[i] != S[j] and S[i] != S[k] and S[j] != S[k]:
cnt += 1
ans = all - cnt
print(ans)
| true |
08ba517915cfe578970138b691811c0a3da6d951
|
Python
|
danielwun/Handwriting-number-classification-1
|
/tools/nulinear.py
|
UTF-8
| 1,360 | 2.609375 | 3 |
[] |
no_license
|
import csv
import glob
import math
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from libsvm.python.svmutil import *
from libsvm.python.svm import *
import os
def readLabel(file):
t=[]
f = open( file, 'r' )
for row in csv.reader(f) :
for a in row :
t.append(a)
f.close()
return t
def Accuracy( t, x):
length = len(t)
match = 0
for i in range(length):
if int(t[i]) == int(x[i]) :
match = match + 1
return float(match/length)
def main():
# read as libsvm format
label, learning = svm_read_problem('learningfile')
tlabel , tlearning = svm_read_problem('testfile')
accuracy = []
n = []
for nu in range(1,99,5):
os.system('svm-train -s 1 -t 0 -n '+str(float(nu/100))+' learningfile learningfile.model')
os.system('svm-predict testfile learningfile.model result.out')
label = readLabel('result.out')
acc = Accuracy(tlabel, label)
n.append(nu)
accuracy.append(acc)
print(nu,' ',acc)
plt.plot( n, accuracy, color='red',ls='',marker='o',)
plt.xlabel('n')
plt.ylabel('accuracy(%)')
plt.savefig('nulinear.png')
plt.show()
if __name__ == "__main__":
main()
| true |
06ba23c7a287a9783fdf7e11f14a430a8ee0af44
|
Python
|
AlexanderFabisch/distance3d
|
/distance3d/geometry.py
|
UTF-8
| 15,675 | 2.96875 | 3 |
[
"Zlib",
"MIT",
"BSD-3-Clause",
"BSD-3-Clause-Clear",
"BSL-1.0",
"Unlicense"
] |
permissive
|
"""Tools for geometric computations."""
import math
from itertools import product
import numba
import numpy as np
from .utils import (
norm_vector, transform_point, plane_basis_from_normal,
scalar_triple_product)
def convert_rectangle_to_segment(rectangle_center, rectangle_extents, i0, i1):
"""Extract line segment from rectangle.
Parameters
----------
rectangle_center : array, shape (3,)
Center point of the rectangle.
rectangle_extents : array, shape (3, 2)
Extents along axes of the rectangles:
0.5 * rectangle_sizes * rectangle_axes.
i0 : int
Either 0 or 1, selecting line segment.
i1 : int
Either 0 or 1, selecting line segment.
Returns
-------
segment_start : array, shape (3,)
Start point of segment.
segment_end : array, shape (3,)
End point of segment.
"""
segment_middle = rectangle_center + (2 * i0 - 1) * rectangle_extents[i1]
segment_start = segment_middle - rectangle_extents[1 - i1]
segment_end = segment_middle + rectangle_extents[1 - i1]
return segment_end, segment_start
RECTANGLE_COORDS = np.array([
[-0.5, -0.5], [-0.5, 0.5], [0.5, -0.5], [0.5, 0.5]])
def convert_rectangle_to_vertices(
rectangle_center, rectangle_axes, rectangle_lengths):
"""Convert rectangle to vertices.
Parameters
----------
rectangle_center : array, shape (3,)
Center point of the rectangle.
rectangle_axes : array, shape (2, 3)
Each row is a vector of unit length, indicating the direction of one
axis of the rectangle. Both vectors are orthogonal.
rectangle_lengths : array, shape (2,)
Lengths of the two sides of the rectangle.
Returns
-------
rectangle_points : array, shape (4, 3)
Vertices of the rectangle.
"""
return rectangle_center + (RECTANGLE_COORDS * rectangle_lengths).dot(rectangle_axes)
def convert_box_to_face(box2origin, size, i, sign):
"""Convert box to face.
Parameters
----------
box2origin : array, shape (4, 4)
Pose of the box.
size : array, shape (3,)
Size of the box along its axes.
i : int
Index of the axis along which we select the face.
sign : int
Indicate the direction along the axis.
Returns
-------
face_center : array, shape (3,)
Center point of the rectangle.
face_axes : array, shape (2, 3)
Each row is a vector of unit length, indicating the direction of one
axis of the rectangle. Both vectors are orthogonal.
face_lengths : array, shape (2,)
Lengths of the two sides of the rectangle.
"""
other_indices = [0, 1, 2]
other_indices.remove(i)
face_center = box2origin[:3, 3] + sign * 0.5 * size[i] * box2origin[:3, i]
face_axes = np.array([box2origin[:3, j] for j in other_indices])
face_lengths = np.array([size[j] for j in other_indices])
return face_center, face_axes, face_lengths
@numba.njit(cache=True)
def convert_segment_to_line(segment_start, segment_end):
"""Convert line segment to line.
Parameters
----------
segment_start : array, shape (3,)
Start point of segment.
segment_end : array, shape (3,)
End point of segment.
Returns
-------
segment_direction : array, shape (3,)
Line direction with unit length (or 0).
segment_length : float
Length of the line segment.
"""
segment_direction = segment_end - segment_start
segment_length = np.linalg.norm(segment_direction)
if segment_length > 0:
segment_direction /= segment_length
return segment_direction, segment_length
BOX_COORDS = np.array(list(product([-0.5, 0.5], repeat=3)))
@numba.njit(
numba.float64[:, :](numba.float64[:, ::1], numba.float64[::1]),
cache=True)
def convert_box_to_vertices(box2origin, size):
"""Convert box to vertices.
Parameters
----------
box2origin : array, shape (4, 4)
Pose of the box.
size : array, shape (3,)
Size of the box along its axes.
Returns
-------
box_points : array, shape (8, 3)
Vertices of the box.
"""
return box2origin[:3, 3] + (BOX_COORDS * size).dot(box2origin[:3, :3].T)
@numba.njit(
numba.float64[:](numba.float64[::1], numba.float64[:, ::1], numba.float64,
numba.float64),
cache=True)
def support_function_cylinder(
search_direction, cylinder2origin, radius, length):
"""Compute extreme point of cylinder along a direction.
You can find similar implementations here:
* https://github.com/kevinmoran/GJK/blob/b38d923d268629f30b44c3cf6d4f9974bbcdb0d3/Collider.h#L42
(Copyright (c) 2017 Kevin Moran, MIT License or Unlicense)
* https://github.com/bulletphysics/bullet3/blob/e306b274f1885f32b7e9d65062aa942b398805c2/src/BulletCollision/CollisionShapes/btConvexShape.cpp#L167
(Copyright (c) 2003-2009 Erwin Coumans, zlib license)
Parameters
----------
search_direction : array, shape (3,)
Search direction.
cylinder2origin : array, shape (4, 4)
Pose of the cylinder.
radius : float
Radius of the cylinder.
length : float
Radius of the cylinder.
Returns
-------
extreme_point : array, shape (3,)
Extreme point along search direction.
"""
local_dir = np.dot(cylinder2origin[:3, :3].T, search_direction)
s = math.sqrt(local_dir[0] * local_dir[0] + local_dir[1] * local_dir[1])
if local_dir[2] < 0.0:
z = -0.5 * length
else:
z = 0.5 * length
if s == 0.0:
local_vertex = np.array([radius, 0.0, z])
else:
d = radius / s
local_vertex = np.array([local_dir[0] * d, local_dir[1] * d, z])
return transform_point(cylinder2origin, local_vertex)
@numba.njit(
numba.float64[:](numba.float64[::1], numba.float64[:, ::1], numba.float64,
numba.float64),
cache=True)
def support_function_capsule(
search_direction, capsule2origin, radius, height):
"""Compute extreme point of cylinder along a direction.
You can find similar implementations here:
* https://github.com/kevinmoran/GJK/blob/b38d923d268629f30b44c3cf6d4f9974bbcdb0d3/Collider.h#L57
(Copyright (c) 2017 Kevin Moran, MIT License or Unlicense)
* https://github.com/bulletphysics/bullet3/blob/e306b274f1885f32b7e9d65062aa942b398805c2/src/BulletCollision/CollisionShapes/btConvexShape.cpp#L228
(Copyright (c) 2003-2009 Erwin Coumans, zlib license)
Parameters
----------
search_direction : array, shape (3,)
Search direction.
capsule2origin : array, shape (4, 4)
Pose of the capsule.
radius : float
Radius of the cylinder.
height : float
Height of the cylinder.
Returns
-------
extreme_point : array, shape (3,)
Extreme point along search direction.
"""
local_dir = np.dot(capsule2origin[:3, :3].T, search_direction)
s = math.sqrt(local_dir[0] * local_dir[0] + local_dir[1] * local_dir[1]
+ local_dir[2] * local_dir[2])
if s == 0.0:
local_vertex = np.array([radius, 0, 0])
else:
local_vertex = local_dir * (radius / s)
if local_dir[2] > 0.0:
local_vertex[2] += 0.5 * height
else:
local_vertex[2] -= 0.5 * height
return transform_point(capsule2origin, local_vertex)
@numba.njit(
numba.float64[:](numba.float64[::1], numba.float64[:, ::1], numba.float64[::1]),
cache=True)
def support_function_ellipsoid(
search_direction, ellipsoid2origin, radii):
"""Compute extreme point of ellipsoid along a direction.
Parameters
----------
search_direction : array, shape (3,)
Search direction.
ellipsoid2origin : array, shape (4, 4)
Pose of the ellipsoid.
radii : array, shape (3,)
Radii of the ellipsoid.
Returns
-------
extreme_point : array, shape (3,)
Extreme point along search direction.
"""
local_dir = np.dot(ellipsoid2origin[:3, :3].T, search_direction)
local_vertex = norm_vector(local_dir * radii) * radii
return transform_point(ellipsoid2origin, local_vertex)
@numba.njit(
numba.float64[:](numba.float64[::1], numba.float64[:, ::1], numba.float64[::1]),
cache=True)
def support_function_box(search_direction, box2origin, half_lengths):
"""Compute extreme point of box along a direction.
Parameters
----------
search_direction : array, shape (3,)
Search direction.
box2origin : array, shape (4, 4)
Pose of the box.
half_lengths : array, shape (3,)
Half lengths of the box along its axes.
Returns
-------
extreme_point : array, shape (3,)
Extreme point along search direction.
"""
local_dir = np.dot(box2origin[:3, :3].T, search_direction)
local_vertex = np.sign(local_dir) * half_lengths
return transform_point(box2origin, local_vertex)
@numba.njit(
numba.float64[:](numba.float64[::1], numba.float64[::1], numba.float64),
cache=True)
def support_function_sphere(search_direction, center, radius):
"""Compute extreme point of box along a direction.
You can find similar implementations here:
* https://github.com/kevinmoran/GJK/blob/b38d923d268629f30b44c3cf6d4f9974bbcdb0d3/Collider.h#L33
(Copyright (c) 2017 Kevin Moran, MIT License or Unlicense)
Parameters
----------
search_direction : array, shape (3,)
Search direction.
center : array, shape (3,)
Center of the sphere.
radius : float
Radius of the sphere.
Returns
-------
extreme_point : array, shape (3,)
Extreme point along search direction.
"""
s_norm = np.linalg.norm(search_direction)
if s_norm == 0.0:
vertex = center + np.array([0, 0, radius])
else:
vertex = center + search_direction / s_norm * radius
return vertex
@numba.njit(
numba.float64[:](numba.float64[::1], numba.float64[::1], numba.float64,
numba.float64[::1]),
cache=True)
def support_function_disk(search_direction, center, radius, normal):
"""Compute extreme point of disk along a direction.
Parameters
----------
search_direction : array, shape (3,)
Search direction.
center : array, shape (3,)
Center of the disk.
radius : float
Radius of the disk.
normal : array, shape (3,)
Normal to the plane in which the disk lies.
Returns
-------
extreme_point : array, shape (3,)
Extreme point along search direction.
"""
x, y = plane_basis_from_normal(normal)
R = np.column_stack((x, y, normal))
point = np.dot(R.T, search_direction)
point[2] = 0.0
norm = np.linalg.norm(point)
if norm == 0.0:
return np.copy(center)
point *= radius / norm
return center + np.dot(R, point)
@numba.njit(
numba.float64[:](numba.float64[::1], numba.float64[::1],
numba.float64[:, ::1], numba.float64[::1]),
cache=True)
def support_function_ellipse(search_direction, center, axes, radii):
"""Compute extreme point of ellipse along a direction.
Parameters
----------
search_direction : array, shape (3,)
Search direction.
center : array, shape (3,)
Center of ellipse.
axes : array, shape (2, 3)
Axes of ellipse.
radii : array, shape (2,)
Radii of ellipse.
Returns
-------
extreme_point : array, shape (3,)
Extreme point along search direction.
"""
local_dir = axes.dot(search_direction)
local_vertex = norm_vector(radii * local_dir) * radii
return center + np.dot(local_vertex, axes)
@numba.njit(
numba.float64[:](numba.float64[::1], numba.float64[:, ::1], numba.float64,
numba.float64),
cache=True)
def support_function_cone(search_direction, cone2origin, radius, height):
"""Compute extreme point of cone along a direction.
Parameters
----------
search_direction : array, shape (3,)
Search direction.
cone2origin : array, shape (4, 4)
Pose of the cone.
radius : float
Radius of the cone.
height : float
Length of the cone.
Returns
-------
extreme_point : array, shape (3,)
Extreme point along search direction.
"""
local_dir = np.dot(cone2origin[:3, :3].T, search_direction)
disk_point = np.array([local_dir[0], local_dir[1], 0.0])
norm = np.linalg.norm(disk_point)
if norm == 0.0:
disk_point = np.array([0.0, 0.0, 0.0])
else:
disk_point *= radius / norm
if np.dot(local_dir, disk_point) >= local_dir[2] * height:
point_in_cone = disk_point
else:
point_in_cone = np.array([0.0, 0.0, height])
return transform_point(cone2origin, point_in_cone)
@numba.njit(cache=True)
def hesse_normal_form(plane_point, plane_normal):
"""Computes Hesse normal form of a plane.
In the Hesse normal form (x * n - d = 0), x is any point on the plane,
n is the plane's normal, and d ist the distance from the origin to the
plane along its normal.
Parameters
----------
plane_point : array, shape (3,)
Point on the plane.
plane_normal : array, shape (3,)
Normal of the plane. We assume unit length.
Returns
-------
plane_normal : array, shape (3,)
Normal of the plane. We assume unit length.
d : float, optional (default: None)
Distance of the plane to origin in Hesse normal form.
"""
return plane_normal, np.dot(plane_point, plane_normal)
def line_from_pluecker(line_direction, line_moment):
"""Computes line from Plücker coordinates.
Parameters
----------
line_direction : array, shape (3,)
Direction of the line. Not necessarily of unit length.
line_moment : array, shape (3,)
Moment of the line.
Returns
-------
line_point : array, shape (3,)
Point on line.
line_direction : array, shape (3,)
Direction of the line. This is assumed to be of unit length.
"""
line_point = np.cross(line_direction, line_moment)
line_direction = line_direction
line_dir_norm_squared = np.dot(line_direction, line_direction)
if line_dir_norm_squared > 0.0:
line_point /= line_dir_norm_squared
line_direction /= math.sqrt(line_dir_norm_squared)
return line_point, line_direction
@numba.njit(cache=True)
def barycentric_coordinates_tetrahedron(p, tetrahedron_points):
"""Barycentric coordinates of a point for tetrahedron.
Source: https://stackoverflow.com/a/38546111/915743
Parameters
----------
p : array, shape (3,)
Point of which we want to determine barycentric coordinates.
tetrahedron_points : array, shape (4, 3)
Each row represents a point of the tetrahedron.
Returns
-------
array, shape (4,)
Barycentric coordinates of p in the tetrahedron.
"""
result = np.empty(4, dtype=np.dtype("float"))
b_to_cd = tetrahedron_points[2:] - tetrahedron_points[1]
bp = p - tetrahedron_points[1]
result[0] = scalar_triple_product(bp, b_to_cd[1], b_to_cd[0])
a_to_bcd = tetrahedron_points[1:] - tetrahedron_points[0]
ap = p - tetrahedron_points[0]
result[1] = scalar_triple_product(ap, a_to_bcd[1], a_to_bcd[2])
result[2] = scalar_triple_product(ap, a_to_bcd[2], a_to_bcd[0])
result[3] = scalar_triple_product(ap, a_to_bcd[0], a_to_bcd[1])
return result / scalar_triple_product(a_to_bcd[0], a_to_bcd[1], a_to_bcd[2])
| true |
38c41976f0c899ed0656aaf5c5b7f4776c0242e5
|
Python
|
peyton/761project
|
/feature_lian/render.py
|
UTF-8
| 1,284 | 2.703125 | 3 |
[] |
no_license
|
import numpy
import matplotlib.pyplot as plt
import time
#plot the result
def render(x, y, fileName, setFlag = True):
x = numpy.array(x)
y = numpy.array(y)
fig, ax = plt.subplots(1,1)
#ax.set_xticks(numpy.arange(0,1,0.1))
if setFlag:
ax.set_xscale("log")
ax.set_yscale("log")
tma = max(y)
plt.ylim(ymin=0.1, ymax=tma*10)
ax.scatter(x,y)
ax.grid()
fig.show()
fig.savefig(fileName)
# raw_input()
def renderCompare(x, y1, y2, title, xlabel, ylabel, outName):
print len(x)
print len(y1)
print len(y2)
fig, ax = plt.subplots(1,1)
#ax.set_xscale("log")
ax.plot(x, y1, 'bs-', x, y2, 'g^-')
ax.grid()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
fig.show()
fig.savefig(outName)
def renderCompareHist(arrT, arrF, title, xlabel, ylabel, outName):
print "beginToRender"
plt.hist(arrT, bins=20, histtype='step', normed=True, color='b', label='True')
print "render1 complete"
plt.hist(arrF, bins=20, histtype='step', normed=True, color='r', label='False')
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend()
#plt.show()
print "save image to file"
plt.savefig(outName)
if __name__ == "__main__":
pass
| true |
3dc1bf6a8692b413209db89c00e045dee603bd15
|
Python
|
lefft/network_demos
|
/py/networkx_demo.py
|
UTF-8
| 1,681 | 3.375 | 3 |
[] |
no_license
|
# coding: utf-8
# ### scratchpad for `networkx` graph demos
# ###### timothy leffel, feb06/2018 <hr style='height:2px; background-color:gray'>
# In[1]:
import networkx as nx
import matplotlib.pyplot as plt
from numpy import random
get_ipython().magic('matplotlib inline')
random.seed(6933)
# In[2]:
# simple network path graph
graph = nx.path_graph(6)
# add edges from a list of 2-tupes, as args to `add_edge`, or from file (TODO)
graph.add_edges_from([(1,2),(1,3)])
graph.add_edge(0, 4)
# draw the graph with `nx.draw_networkx()`
nx.draw_networkx(graph)
# In[3]:
# function to quickly plot various kinds of graphs
def make_net(graph_method, edge_list, **kwargs):
return nx.draw_networkx(graph_method(edge_list), **kwargs)
# make some edges, in a list
edges = [('edges','can'), ('can','be stuff'), ('be stuff', 'like'),
('like','bigrams'), ('edgy','edges'), ('orphan','component </3'),
('me','like'), ('like','edges')]
# pass in whatever style components you want to via `**kwargs` param
kwargs = {'font_size': 14, 'node_color': 'lightgray'}
# bind the arguments that won't vary in the subplots (edges and style)
plot_edges_with = lambda nx_method: make_net(nx_method, edges, **kwargs)
# In[4]:
# subplot syntax: `xyz` `x`=nrow, `y`=ncol, `z`=position
plt.subplot(221)
plot_edges_with(nx.path_graph)
plt.subplot(222)
plot_edges_with(nx.Graph)
plt.subplot(223)
plot_edges_with(nx.DiGraph)
plt.subplot(224)
plot_edges_with(nx.MultiGraph)
# format is: ([l, t, r, b], with l < r, t < b)
# default is: `rect=[0, 0, 1, 1]`
plt.tight_layout(rect=[.5, .5, 3, 2])
# try this but with setting seed inside of `make_net()`!
plt.show()
| true |
d129d0bfb88f86e840dbca770aed57fda45162c3
|
Python
|
Kartikeya97/Automated-attendance
|
/DatabaseInitializer.py
|
UTF-8
| 4,765 | 3.171875 | 3 |
[] |
no_license
|
import pymysql.cursors
import csv
class DatabaseInitializer:
def __init__(self):
self.connection = 0
def ReadCSVRollList(self, file_name):
roll_list = []
# Opening and reading CSV File
with open(file_name, newline='') as csvfile:
file_reader = csv.reader(csvfile, delimiter=',', quotechar='|')
# Creating a Roll List(Roll Number and Name)
for row in file_reader:
roll_list.append(row)
return roll_list
def EC_MySQLServer(self, host, user, password, port):
msg = ""
# Connecting to MYSQL Server
try:
self.connection = pymysql.connect(host = host, user = user, password = password, port = port, cursorclass = pymysql.cursors.DictCursor)
msg = "Connection Successfully Established with MySQL Server!"
except Exception:
msg = "Unnable to Connect with MySQL Server!"
return msg
def EC_Database(self, host, user, password, port, db_name):
msg = ""
try:
self.connection = pymysql.connect(host = host, user = user, password = password, port = port, database = db_name, cursorclass = pymysql.cursors.DictCursor)
msg = "Connection Successfully Established with the Database!"
except Exception:
msg = "Unnable to Connect with the Databse!"
return msg
def CreateDatabase(self, db_name):
msg = ""
# Query for Creating Database
query = "CREATE DATABASE " + db_name
# Creating Database
try:
with self.connection.cursor() as cursor:
cursor.execute(query)
msg = "DataBase successfully created!"
except Exception:
msg = "Unnable to create the database!"
return msg
def CreateTable(self, table_name):
msg = ""
# Query for Creating Table
query = "CREATE TABLE " + str(table_name) + " (S_No INT(1) NOT NULL AUTO_INCREMENT, Roll_No INT(1) NOT NULL, Name VARCHAR(35) NOT NULL, Day_1 CHAR(1) NULL, Day_2 CHAR(1) NULL, Day_3 CHAR(1) NULL, Day_4 CHAR(1) NULL, Day_5 CHAR(1) NULL, Day_6 CHAR(1) NULL, Day_7 CHAR(1) NULL, Day_8 CHAR(1) NULL, Day_9 CHAR(1) NULL, Day_10 CHAR(1) NULL, Day_11 CHAR(1) NULL, Day_12 CHAR(1) NULL, Day_13 CHAR(1) NULL, Day_14 CHAR(1) NULL, Day_15 CHAR(1) NULL, Day_16 CHAR(1) NULL, Day_17 CHAR(1) NULL, Day_18 CHAR(1) NULL, Day_19 CHAR(1) NULL, Day_20 CHAR(1) NULL, Day_21 CHAR(1) NULL, Day_22 CHAR(1) NULL, Day_23 CHAR(1) NULL, Day_24 CHAR(1) NULL, Day_25 CHAR(1) NULL, Day_26 CHAR(1) NULL, Day_27 CHAR(1) NULL, Day_28 CHAR(1) NULL, Day_29 CHAR(1) NULL, Day_30 CHAR(1) NULL, Day_31 CHAR(1) NULL, PRIMARY KEY(S_No));"
# Creating Table
try:
with self.connection.cursor() as cursor:
cursor.execute(query)
msg = "Table Created Successfully!"
except Exception:
msg = "Unable to create the table!"
return msg
def InitializeTable(self, table_name, roll_list):
flag = 0
# Looping through the Roll List
for roll_no, name in roll_list:
# Query to initialize table with roll_list
query = "INSERT INTO " + str(table_name) + " (Roll_No, Name) VALUES('" + str(roll_no) + "', '" + (name) + "');"
# Executing query to initialize table with roll_list
try:
with self.connection.cursor() as cursor:
cursor.execute(query)
self.connection.autocommit(True)
except Exception:
flag = 1
if (flag == 0):
msg = "Table Initialized Successfully!"
else:
msg = "Unnable to initialize the table!"
return msg
def DisconnectDatabase(self):
# Disconnecting Database
if(self.connection != 0):
self.connection.close()
self.connection = 0
print("Database Disconnected!")
DI = DatabaseInitializer()
roll_list = DI.ReadCSVRollList("DatabaseEntities/RollList.csv")
host = 'localhost'
user = 'root'
password = ''
port = 3306
db_name = 'attendance'
table_name = 'subject'
'''
print(DI.EC_MySQLServer(host, user, password, port))
print(DI.CreateDatabase(db_name))
print(DI.EC_Database(host, user, password, port, db_name))
print(DI.CreateTable(table_name))
print(DI.InitializeTable(table_name, roll_list))
'''
DI.DisconnectDatabase()
| true |
d16f827380682937f457531c80a499456a74f285
|
Python
|
vamsikrishna6668/python
|
/constructor1.py
|
UTF-8
| 878 | 3.34375 | 3 |
[] |
no_license
|
class schl_student:
std_idno=int(input("Enter a student Idno:"))
std_name=input("Enter a Student Name:")
def __init__(self):
self.std_class=int(input("Enter a student which class he/she belong to:"))
self.std_lang=input("Enter a student Mother Tongue Language:")
print("Iam a Constructor")
def assign(self):
while True:
self.std_add=input("Enter a Student Address:")
self.std_locat=input("Enter a student Location:")
ans=input("Enter a continute press Y/y:")
if ans=='y' or ans=='Y':
continue
else:
break
def display(self):
print(self.std_locat)
print(self.std_add)
print(schl_student.std_idno)
print(schl_student.std_name)
#call
s1=schl_student()
s1.assign()
s1.display()
| true |
a5991b6f618b85e1395740a30b1fecb218988e0a
|
Python
|
mauryquijada/scrapr
|
/scraper.py
|
UTF-8
| 1,776 | 3 | 3 |
[] |
no_license
|
import csv
import urllib
import time
from bs4 import BeautifulSoup
from flask import Flask, render_template, request, url_for, redirect
app = Flask(__name__)
@app.route("/")
def home():
# It's just a plain vanilla form. Just return it.
return render_template('index.html')
@app.route("/scrapr", methods=['POST'])
def parse():
# Get the URL submitted, open the connection, get results, and start
# parsing the object.
url_to_fetch = request.form['url']
connection = urllib.urlopen(url_to_fetch)
text = connection.read()
page_soup = BeautifulSoup(text)
# Open the CSV file for writing. We will end up returning this.
csvFilename = "output_" + str(int(time.time())) + ".csv"
csvFile = open("static/" + csvFilename, "w")
csvWriter = csv.writer(csvFile, delimiter=',')
# For every table, create a soup to process all of its individual parts.
for table in page_soup.find_all("table"):
table_soup = BeautifulSoup(str(table))
for body in table_soup:
body = BeautifulSoup(str(body))
rows = body.find_all("tr")
for tr in rows:
cols = tr.find_all(["td", "th"])
colsArr = []
for td in cols:
data_set = unicode(td.string).strip()
# Expand any headers that might span more than 1 column.
if "colspan" in td.attrs:
times_to_repeat = int(td["colspan"])
else:
times_to_repeat = 1
# Append to accumulated array as appropriate.
if data_set.isdigit():
data_set = int(data_set)
for i in range(times_to_repeat):
colsArr.append(data_set)
csvWriter.writerow(colsArr)
# Write an empty row just to give some space.
csvWriter.writerow([])
csvFile.close()
return redirect(url_for("static", filename=csvFilename))
if __name__ == "__main__":
app.run(debug = True, port=8080)
| true |
b54b62d8c3db48a169d0e0a9dfeb25e8e51a0d82
|
Python
|
Uchiha-Itachi0/The-Complete-FAANG-Preparation
|
/1]. DSA/3]. 450 DSA by ( Love Babbar Bhaiya )/Python/01]. Array/03_)_Kth_Max_Min_Element.py
|
UTF-8
| 289 | 3.796875 | 4 |
[
"MIT"
] |
permissive
|
# Question link : https://practice.geeksforgeeks.org/problems/kth-smallest-element5635/1#
def kth_min_element(arr, k):
"""
Time complexity : O(nlogn)
Space Complexity : O(1)
"""
arr.sort()
return arr[k - 1]
print(kth_min_element([1, 2, 3, 4, 5], 3))
| true |
b4376f8a8b07f263ebe9a8264afbf097133733bd
|
Python
|
jamesob/uncertainties
|
/uncertainties.py
|
UTF-8
| 4,146 | 3.765625 | 4 |
[] |
no_license
|
#!/usr/bin/env python
# by jamesob
# because I love physics lab
import math
class UncertainVariable(object):
"""A class defining a variable with associated uncertainty stored as a
physical quantity and a percentage. Uncertainties are automatically
calculated and maintained as the variable is operated on.
Values must be set before associated uncertainties are.
>>> x = UncertainVariable(44.8, 0.2)
>>> t = UncertainVariable(3.21, 0.02)
>>> res = x / t**2
>>> res.val
4.3477838918488754
>>> res.uncert
0.057549919541000298
>>> res.uncertPerc
1.3236609954071901
>>> mu = UncertainVariable(0.004, 0.2 / 1000)
>>> L = UncertainVariable(0.6, 0.5 / 100)
>>> res = 4 * mu
>>> res2 = L ** 2
>>> yores = res * res2
>>> foores = 4 * mu * L**2
>>> foores.val == yores.val
True
>>> foores.uncert == yores.uncert
True
>>> foores.uncertPerc == yores.uncertPerc
True
"""
def __init__(self, val=1., uncert=1.):
self.val = val
self._uncert = uncert
self._uncertPerc = 100.0 * (uncert / val)
def getUnc(self):
return self._uncert
def setUnc(self, unc):
self._uncert = unc
self._uncertPerc = 100.0 * (unc / self.val)
uncert = property(getUnc, setUnc, doc="Uncertainty in units.")
def getPerc(self):
return self._uncertPerc
def setPerc(self, uncPerc):
self._uncertPerc = uncPerc
self._uncert = (uncPerc * self.val) / 100.0
uncertPerc = property(getPerc, setPerc, doc="Uncertainty percentage.")
def _addAndSub(a, b, op):
"""Not to be used outside of class definition. Encapsulates similarities
between addition and subtraction."""
newVar = UncertainVariable()
if isinstance(b, UncertainVariable):
newVar.val = op(a.val, b.val)
newVar.setUnc(math.sqrt(a.uncert**2 + b.uncert**2))
else:
newVar.val = op(a.val, b)
newVar.setUnc(a.uncert)
return newVar
def _multAndDiv(a, b, op):
"""Not to be used outside of class definition. Encapsulates similarities
between mult. and div."""
newVar = UncertainVariable()
if isinstance(b, UncertainVariable):
newVar.val = op(a.val, b.val)
newVar.setPerc(math.sqrt(a.uncertPerc**2 + b.uncertPerc**2))
else:
newVar.val = op(a.val, b)
newVar.setUnc(op(a.uncert, b))
return newVar
def __add__(self, other):
"""Overloads the + operator for this object."""
return self._addAndSub(other, lambda x, y: x + y)
def __sub__(self, other):
"""Overloads the - operator for this object."""
return self._addAndSub(other, lambda x, y: x - y)
def __mul__(self, other):
"""Overloads multiplication operator."""
return self._multAndDiv(other, lambda x, y: x * y)
def __div__(self, other):
"""Overloads division operator."""
return self._multAndDiv(other, lambda x, y: x / y)
def __pow__(self, other):
"""Overloads the exponent operator."""
newVar = UncertainVariable()
newVar.val = self.val**other
newVar.setPerc(self.uncertPerc * other)
return newVar
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return self.__sub__(other)
def __rdiv__(self, other):
return self.__div__(other)
def __rmul__(self, other):
return self.__mul__(other)
def __rpow__(self, other):
return self.__pow__(other)
def specialFunction(self, fnc):
"""For special functions, e.g. sin, log, exp."""
newVar = UncertainVariable()
plusUnc = self.val + self.unc
minUnc = self.val - self.unc
upperVal = fnc(plusUnc) - fnc(self.val)
lowerVal = fnc(minUnc) - fnc(self.val)
newVar.val = fnc(self.val)
newVar.setUnc((upperVal + lowerVal) / 2.0)
return self
if __name__ == '__main__':
import doctest
doctest.testmod()
| true |
6769aadbdda8ad311acb14fa0e8af2e156f9df20
|
Python
|
adityasarvaiya/coding
|
/Crio/LinkedList/5_RemoveNthNodeFromEndOfList/RemoveNthNodeFromEndOfList.py
|
UTF-8
| 720 | 2.984375 | 3 |
[] |
no_license
|
from Solution import *
# CRIO_SOLUTION_START_MODULE_L1_PROBLEMS
# CRIO_SOLUTION_END_MODULE_L1_PROBLEMS
def createList(numbers):
if(len(numbers) == 0):
return None
head = ListNode(numbers[0])
node = head
for i in range(1, len(numbers)):
node.next = ListNode(numbers[i])
node = node.next
return head
def extractList(head):
arr = []
while(head):
arr.append(head.val)
head = head.next
return arr
if __name__ == '__main__':
row = input().split()
n = int(row[0])
k = int(row[1])
nums = input().split()
nums = [int(i) for i in nums]
List = createList(nums)
result = removeNthNodeFromEndOflist(List, k)
res = extractList(result)
for i in res:
print(i,end=' ')
| true |
7099d1e628b13bd5d501a473904fcaff449e683d
|
Python
|
Tanuruha-Majumdar/The-Invisible-Cloak-with-adjustable-colour-of-cloak
|
/hsv_masking.py
|
UTF-8
| 2,243 | 2.515625 | 3 |
[] |
no_license
|
import cv2
import numpy as np
def noise_removal(mask):
cv2.imshow("mask", mask)
# kernel=np.ones((10, 10), np.uint8)
# mask = cv2.erode(mask, kernel, iterations=5) # noise removal
# mask = cv2.dilate(mask, kernel, iterations=8)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8), iterations=2) # noise removal
mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8), iterations=1)
return mask
def empty(x):
pass
def hsv_masking():
cv2.namedWindow("TRACKBARS")
# cv2.resizeWindow("TRACKBARS", 650, 250)
cv2.createTrackbar("Hue Min", "TRACKBARS", 0, 179, empty)
cv2.createTrackbar("Hue Max", "TRACKBARS", 179, 179, empty)
cv2.createTrackbar("Sat Min", "TRACKBARS", 0, 255, empty)
cv2.createTrackbar("Sat Max", "TRACKBARS", 255, 255, empty)
cv2.createTrackbar("Val Min", "TRACKBARS", 0, 255, empty)
cv2.createTrackbar("Val Max", "TRACKBARS", 255, 255, empty)
vid = cv2.VideoCapture(0)
while vid.isOpened():
success, img = vid.read()
if not success:
break
img = cv2.resize(img, (900, 520))
img = np.flip(img, 1)
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h_min = cv2.getTrackbarPos("Hue Min", "TRACKBARS")
h_max = cv2.getTrackbarPos("Hue Max", "TRACKBARS")
s_min = cv2.getTrackbarPos("Sat Min", "TRACKBARS")
s_max = cv2.getTrackbarPos("Sat Max", "TRACKBARS")
v_min = cv2.getTrackbarPos("Val Min", "TRACKBARS")
v_max = cv2.getTrackbarPos("Val Max", "TRACKBARS")
# print(h_min,h_max,s_min,s_max,v_min,v_max)
lower = [h_min, s_min, v_min]
upper = [h_max, s_max, v_max]
mask = cv2.inRange(hsv_img, np.array(lower), np.array(upper))
mask = noise_removal(mask)
color_mask = cv2.bitwise_and(img, img, mask=mask)
# cv2.imshow("Original",img)
# cv2.imshow("HSV",hsv_img)
cv2.imshow("HSV_MASK", mask)
cv2.imshow("colormask", color_mask)
k = cv2.waitKey(1)
if k == 27: # esc ascii
break
vid.release()
cv2.destroyAllWindows()
return lower, upper
| true |
a9cb44f53d8005916cdcaf269c8977e772515f2d
|
Python
|
JosephLevinthal/CoronaMail
|
/coronavirusEmails.py
|
UTF-8
| 8,804 | 3 | 3 |
[] |
no_license
|
from selenium import webdriver
class CoronaVirus:
import schedule
import time
# Função que inicia o contato com o Google Chrome para o web scraping
def __init__(self):
self.driver = webdriver.Chrome("chromedriver.exe")
# Função que faz o web scraping dos dados da CNN, NBC, CNBC e organiza em um dataframe
def manchetes(self):
print("oi")
from datetime import date
today = date.today()
d = today.strftime("%m-%d-%y")
print("date=", d)
cnn_url = "https://edition.cnn.com/world/live-news/coronavirus-pandemic-{}-intl/index.html".format(
d
)
from bs4 import BeautifulSoup
import requests
html = requests.get(cnn_url).text
soup = BeautifulSoup(html)
nbc_url = "https://www.nbcnews.com/health/coronavirus"
cnbc_rss_url = "https://www.cnbc.com/id/10000108/device/rss/rss/html"
urls = [cnn_url, nbc_url, cnbc_rss_url]
formats = ["html.parser", "html.parser", "xml"]
tags = ["h2", "h2", "title"]
website = ["CNN", "NBC", "CNBC"]
crawl_len = 0
for url in urls:
print("Crawling web page.........{}".format(url))
response = requests.get(url)
soup = BeautifulSoup(response.content, formats[crawl_len])
# for link in soup.find_all(tags[crawl_len]):
# if len(link.text.split(" ")) > 4:
# print("Headline : {}".format(link.text))
# crawl_len = crawl_len + 1
crawl_len = 0
news_dict = []
for url in urls:
response = requests.get(url)
soup = BeautifulSoup(response.content, formats[crawl_len])
for link in soup.find_all(tags[crawl_len]):
if len(link.text.split(" ")) > 4:
news_dict.append(
{
"website": website[crawl_len],
"url": url,
"headline": link.text,
}
)
crawl_len = crawl_len + 1
import pandas as pd
news_df = pd.DataFrame(news_dict)
pd.set_option("max_colwidth", 800)
print(news_df.count())
news_df["website"] = news_df["website"].astype(str)
news_df["url"] = news_df["url"].astype(str)
news_df["headline"] = news_df["headline"].astype(str)
print(news_df.website.unique())
print(news_df.dtypes)
noticia_CNN = news_df["url"][1]
noticia_NBC = "https://www.nbcnews.com/health/coronavirus"
noticia_CNBC = "https://www.cnbc.com/coronavirus/"
df_headlines = news_df.groupby("website").head(4).reset_index(drop=True)
global manchetesPrint
manchetesPrint = str(
"Principais manchetes do dia: \n\n"
+ "CNN - "
+ noticia_CNN
+ "\n"
+ "1- "
+ df_headlines["headline"][1]
+ "\n"
+ "2- "
+ df_headlines["headline"][2]
+ "\n"
+ "3- "
+ df_headlines["headline"][3]
+ "\n\n"
+ "NBC - "
+ noticia_NBC
+ "\n"
+ "1- "
+ df_headlines["headline"][4]
+ "\n"
+ "2- "
+ df_headlines["headline"][6]
+ "\n"
+ "3- "
+ df_headlines["headline"][7]
+ "\n\n"
+ "CNBC - "
+ noticia_CNBC
+ "\n"
+ "1- "
+ df_headlines["headline"][8]
+ "\n"
+ "2- "
+ df_headlines["headline"][9]
+ "\n"
+ "3- "
+ df_headlines["headline"][10]
)
# Função que faz o web scraping dos dados sobre atualizações do coronavirus e envia um e-mail para a lista de e-mails
def get_data(self):
country = "Brazil"
try:
country_contains = "contains(., '%s')" % country
self.driver.get("https://www.worldometers.info/coronavirus/")
table = self.driver.find_element_by_xpath(
'//*[@id="main_table_countries_today"]/tbody[1]'
)
country_element = table.find_element_by_xpath("//td[%s]" % country_contains)
row = country_element.find_element_by_xpath("./..")
total_cases = row.find_element_by_xpath("//tr[%s]/td[3]" % country_contains)
new_cases = row.find_element_by_xpath("//tr[%s]/td[4]" % country_contains)
total_deaths = row.find_element_by_xpath(
"//tr[%s]/td[5 ]" % country_contains
)
new_deaths = row.find_element_by_xpath("//tr[%s]/td[6]" % country_contains)
active_cases = row.find_element_by_xpath(
"//tr[%s]/td[8]" % country_contains
)
total_recovered = row.find_element_by_xpath(
"//tr[%s]/td[7]" % country_contains
)
serious_critical = row.find_element_by_xpath(
"//tr[%s]/td[9]" % country_contains
)
tot_1m = row.find_element_by_xpath("//tr[%s]/td[9]" % country_contains)
print("Country: " + str(country_element.text))
print("Total cases: " + str(total_cases.text))
print("New cases: " + str(new_cases.text))
print("Total deaths: " + str(total_deaths.text))
print("New deaths: " + str(new_deaths.text))
print("Active cases: " + str(active_cases.text))
print("Total recovered: " + str(total_recovered.text))
print("Serious, critical cases: " + str(serious_critical.text))
print("Total cases/1M population: " + str(tot_1m.text))
import smtplib
import email.utils
import datetime
import schedule
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
dt = datetime.datetime.today()
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login("insert your email here", "insert your app password here")
subject = (
"Estatisticas do COVID-19 em: "
+ "Brasil" # str(country_element.text)
+ ", no dia "
+ dt.strftime("%d-%m-%Y")
)
body = (
(
"--------------------------------\n"
+ "Dados de: "
+ "Brasil" # str(country_element.text)
+ "\
\n\nAtualizacao dos casos de COVID-19:\
\n\nNumero total: "
+ str(total_cases.text)
+ "\
\nCasos ativos: "
+ str(active_cases.text)
+ "\
\nCasos criticos: "
+ str(serious_critical.text)
+ "\
\n\nTotal de mortes: "
+ str(total_deaths.text)
+ "\
\nTotal de recuperados: "
+ str(total_recovered.text)
+ "\
\n\nNovos casos (em relacao ao dia anterior): "
+ str(new_cases.text)
+ "\
\nNovas mortes (em relacao ao dia anterior): "
+ str(new_deaths.text)
+ "\n--------------------------------\
\n\nOs dados foram obtidos de https://www.worldometers.info/coronavirus/ e sao atualizados em: 00:00(GMT+0)."
+ "\nNao deixe de seguir as recomendacoes medicas!"
+ "\n\n--------------------------------\n"
+ manchetesPrint
+ "\n\n\nEste e-mail foi enviado automaticamente. Caso nao queira mais receber, mande uma mensagem :D"
+ "\nhttps://www.facebook.com/joseph.viana"
+ "\nhttps://www.linkedin.com/in/josephlevinthal/"
+ "\n\n"
)
.encode("ascii", "ignore")
.decode("ascii")
)
listaEmails = [
"insertemailshere"
]
msg = f"Subject: {subject}\n\n{body}"
server.sendmail("Coronavirus", listaEmails, msg)
print("SENT!")
server.quit()
self.driver.close()
except Exception as e:
print(e)
self.driver.quit()
if __name__ == "__main__":
bot = CoronaVirus()
bot.manchetes()
bot.get_data()
| true |
2273a7102422ce8546fed4396e01180b1965f5bb
|
Python
|
omongo/prime_factors
|
/prime_factors.py
|
UTF-8
| 440 | 3.65625 | 4 |
[] |
no_license
|
class PrimeFactors:
def __init__(self, number):
self.number = number
self.data = []
def get_data(self):
number = self.number
for i in range(2, self.number + 1):
number = self._add_prime(i, number)
return self.data
def _add_prime(self, prime, number):
while number % prime == 0:
self.data.append(prime)
number /= prime
return number
| true |
5b7893035cea27df3fe6a7056c195a6e629883ed
|
Python
|
wookkl/backjoon-problemsolving
|
/[7568]덩치.py
|
UTF-8
| 177 | 3.296875 | 3 |
[] |
no_license
|
p = [list(map(int, input().split())) for _ in range(int(input()))]
for i in p:
c = 1
for j in p:
if i[0] < j[0] and i[1] < j[1]:
c += 1
print(c)
| true |
7956c6b87b88dd9d91cda88d882fa2cade349686
|
Python
|
HerculesGit/python3
|
/introducao_A_programacao/televisao.py
|
UTF-8
| 576 | 3.09375 | 3 |
[] |
no_license
|
class Televisao:
def __init__(self, cmin, cmax):
'Televisao'
self.ligada = False
self.canal = 2
self.cmin = cmin
self.cmax = cmax
def muda_canal_para_cima(self):
if (self.canal+1 <= self.cmax):
self.canal+=1
else:
pass #canal = cmin
def muda_canal_para_baixo(self):
if (self.canal-1 >= self.cmin):
self.canal-=1
else:
pass #canal = cmax
def __str__(self):
return 'ligada:{0} ; canal:{1}'.format(self.ligada,self.canal)
| true |
1c04aae7d2e1abc3c1277d181287d5c5dd9e800b
|
Python
|
MatheMatrix/SensorDataDownload
|
/ObliData.py
|
UTF-8
| 2,740 | 2.765625 | 3 |
[] |
no_license
|
from DataKernel import *
class ObliData(DataKernel):
"""load Obliquitous data from remote data server"""
def __init__(self, server, db, uid, pwd, path, sensType):
'''Init some argvs
'''
DataKernel.__init__(self, server, db, uid, pwd, path)
self.sensType = sensType
def GetChdata(self, start, end, table):
'''Get data from data and classify into channels
start and end should be channel ID
table is a table's name
return a dict like
{ 'ch1': [12, 23, 34, 45, ... ... ],
'ch2': [90. 89. 78, 67, ... ... ],
...
}
'''
print 'Getting data in table: {0}'.format(table)
conn = self.Connect()
conn.timeout = 100
cursor = conn.cursor()
data = {}
dtTableStart, dtTableEnd = self.TableTime(table)
data = {}.fromkeys(['ch' + str(i) for i in range(start, end + 1)])
for i in data:
data[i] = []
cmd = "select [Data] from [{0}}].[dbo].[{1}]".format(self.db, table) + \
" where [DateTime] between " + \
"'{0}' and '{1}' order by [ID] asc".format(self.dtStartSQL, self.dtEndSQL)
try:
cursor.execute(cmd)
except pyodbc.OperationalError:
print 'SQL Server quary timeout, the sql server may have some problems'
row = cursor.fetchone()
if row:
row = self.Format( row[0] )
while row:
for i in data:
data[i].extend( self.Divide(row)[i] )
row = cursor.fetchone()
if row:
row = self.Format( row[0] )
conn.close()
return data
def Format(self, row):
'''To Format and calculate datas (from bytes to short)
ch should be a string like: ch1
row should be a bytearray
return row likes: [6401, 4353, 3841, ... ]
'''
row = struct.unpack('<' + 'h' * 340, buffer(row))
row = list(row)
for i in range(len(row)):
row[i]= float(row[i]) * 10 / int('7fff', 16)
return row
def Divide(self, row):
'''Divide data of Obliquitous's different channels
Input data should be a list of numbers like:
[1510.452, 1505.248, 1508.025, 1510.203, 1512.271, ... ]
the return will like:
{ 'ch1': [21, 35, 74, ... ]
'ch2': [21, 32, 43, ... ]
...
}
'''
data = {}.fromkeys(['ch' + str(i) for i in range(1, 17 + 1)])
for i in data:
data[i] = []
for i in range(len(row)):
data['ch' + str(i % 17 + 1)].append(row[i])
return data
| true |
b9e093f1479cf48f5d028d8ec7e97c3815fd61eb
|
Python
|
sliri/nextpy
|
/nextpy 1.1.4.py
|
UTF-8
| 594 | 4.15625 | 4 |
[] |
no_license
|
# 1.1.4
################
import functools
def list2str(num):
"""Convert an integer to a list of its digits, each of which is a string"""
return list(str(num))
def str2int(num):
"""Convert an integer to a list of digits, each of which is an integer"""
return list(map(int, num))
def add(x, y):
"""Add two numbers"""
return x + y
def sum_of_digits(number):
"""Get an integer, return the sum of its digits"""
return functools.reduce(add, (str2int(list2str(number))))
if __name__ == "__main__":
test_input = 1858
print(sum_of_digits(test_input))
| true |
d6dec035e76ab76cf8e63ba19ac6549038108769
|
Python
|
njsmith/async_generator
|
/async_generator/util.py
|
UTF-8
| 209 | 2.578125 | 3 |
[
"MIT"
] |
permissive
|
class aclosing:
def __init__(self, aiter):
self._aiter = aiter
async def __aenter__(self):
return self._aiter
async def __aexit__(self, *args):
await self._aiter.aclose()
| true |
0fc571632c2ae7c820f415b45b15f46290ecae39
|
Python
|
Nishinomiya0foa/Old-Boys
|
/homework/ftp_homework/core/upload.py
|
UTF-8
| 818 | 2.515625 | 3 |
[] |
no_license
|
import os
import struct
import json
from homework.ftp_homework.core import message_head
def upload():
head = message_head.head()
filelist = os.listdir(head['path'])
# print(filelist)
dic = {}
print("文件列表如下:请选择想要上传的文件。")
for i in range(len(filelist)):
dic[i] = filelist[i]
print((i,dic[i]))
choose_file = input('>')
try:
choose_file = int(choose_file)
except:
print("找不到你选中的文件")
filename = dic[choose_file]
full_filename = os.path.join(head['path'],filename)
filesize = os.path.getsize(full_filename)
head['filename'] = filename
head['filesize'] = filesize
filesize_pack = struct.pack('i',filesize)
return head, filesize_pack
if __name__ == '__main__':
upload()
| true |
2c7c58c3e10a732f7af76160d42ab69af123075f
|
Python
|
AdamZhouSE/pythonHomework
|
/Code/CodeRecords/2176/60621/249520.py
|
UTF-8
| 197 | 2.90625 | 3 |
[] |
no_license
|
a=input()
b=[]
for i in range(1,len(a)+1):
b.append(a[len(a)-i:len(a)])
c=set(b)
b=list(c)
b.sort()
st=""
for i in b:
st+=str(a.rfind(i)+1)
if i!=b[len(b)-1]:
st+=" "
print(st)
| true |
06007abcfa69b83a4c1bd1685b522df75239331a
|
Python
|
hsiehpinghan/trident
|
/trident/data/transform.py
|
UTF-8
| 12,970 | 2.65625 | 3 |
[
"MIT"
] |
permissive
|
import numbers
from abc import ABC, abstractmethod
from collections import Iterable
from typing import Sequence, Tuple, Dict, Union, Optional, Callable, Any
import collections
import numpy as np
import cv2
from trident.backend.common import OrderedDict
from trident.backend.common import *
from trident.backend.tensorspec import TensorSpec, object_type_inference, ObjectType
if get_backend() == 'pytorch':
from trident.backend.pytorch_ops import *
elif get_backend() == 'tensorflow':
from trident.backend.tensorflow_ops import *
__all__ = ['Transform', 'VisionTransform', 'TextTransform']
class Transform(ABC):
"""
Rewrite apply method in subclass.
"""
def __init__(self, name=None):
self.name=name
self.is_spatial=False
def apply_batch(self, inputs: Sequence[Tuple],spec:Optional[TensorSpec]=None):
if spec is None and self.is_spatial==True:
spec=TensorSpec(shape=tensor_to_shape(inputs[0]), object_type=object_type_inference(inputs[0]))
return tuple(self.apply(input,spec) for input in inputs)
@abstractmethod
def apply(self, input: Tuple,spec:TensorSpec):
pass
def __call__(self, inputs: Union[Dict[TensorSpec,np.ndarray],np.ndarray],**kwargs):
pass
def __repr__(self):
return self.__class__.__name__
class VisionTransform(Transform):
r"""
Base class of all transforms used in computer vision.
Calling logic: apply_batch() -> apply() -> _apply_image() and other _apply_*()
method. If you want to implement a self-defined transform method for image,
rewrite _apply_image method in subclass.
:param order: input type order. Input is a tuple containing different structures,
order is used to specify the order of structures. For example, if your input
is (image, boxes) type, then the ``order`` should be ("image", "boxes").
Current available strings and data type are describe below:
* "image": input image, with shape of `(H, W, C)`.
* "coords": coordinates, with shape of `(N, 2)`.
* "boxes": bounding boxes, with shape of `(N, 4)`, "xyxy" format,
the 1st "xy" represents top left point of a box,
the 2nd "xy" represents right bottom point.
* "mask": map used for segmentation, with shape of `(H, W, 1)`.
* "keypoints": keypoints with shape of `(N, K, 3)`, N for number of instances,
and K for number of keypoints in one instance. The first two dimensions
of last axis is coordinate of keypoints and the the 3rd dimension is
the label of keypoints.
* "polygons": a sequence containing numpy arrays, its length is the number of instances.
Each numpy array represents polygon coordinate of one instance.
* "category": categories for some data type. For example, "image_category"
means category of the input image and "boxes_category" means categories of
bounding boxes.
* "info": information for images such as image shapes and image path.
You can also customize your data types only if you implement the corresponding
_apply_*() methods, otherwise ``NotImplementedError`` will be raised.
"""
def __init__(self, name=None):
super().__init__(name=name)
self.output_size=None
self._shape_info = None
def apply_batch(self, inputs: Sequence[Tuple],spec:Optional[TensorSpec]=None):
r"""Apply transform on batch input data."""
if not isinstance(inputs,OrderedDict) :
if self.is_spatial==True:
self._shape_info =None
if spec is None :
spec = TensorSpec(shape=tensor_to_shape(inputs,need_exclude_batch_axis=True,is_singleton=True), object_type=ObjectType.rgb)
return self.apply(inputs, spec)
else:
results=OrderedDict()
sampledata= list(inputs.values())[0]
spec=inputs.key_list[0]
if (isinstance(sampledata, Iterable) and not isinstance(sampledata, np.ndarray)) or (is_tensor_like(sampledata) and spec.ndim == sampledata.ndim):
for i in range(len(sampledata)):
self._shape_info = None
for spec, data in inputs.items():
if spec not in results:
results[spec] = []
results[spec].append(self.apply(data[i], spec))
else:
self._shape_info = None
for spec, data in inputs.items():
results[spec]=self.apply(data, spec)
return results
def apply(self, input: Tuple,spec:TensorSpec):
r"""Apply transform on single input data."""
if spec is None:
return self._apply_image(input,None)
apply_func = self._get_apply(spec.object_type.value)
if apply_func is None:
return input
else:
img_data=apply_func(input,spec)
if apply_func.__qualname__ == '_apply_image':
img_data=self.check_pixel_range(img_data)
return img_data
def _get_apply(self, key):
if ('image' in key or 'rgb' in key or 'gray' in key) and key!='image_path' :
return getattr(self, "_apply_{}".format('image'), None)
elif 'bbox' in key:
return getattr(self, "_apply_{}".format('boxes'), None)
elif 'mask' in key:
return getattr(self, "_apply_{}".format('mask'), None)
elif 'keypoint' in key or 'landmark' in key:
return getattr(self, "_apply_{}".format('keypoints'), None)
elif 'polygon' in key :
return getattr(self, "_apply_{}".format('polygons'), None)
elif 'label' in key:
return getattr(self, "_apply_{}".format('labels'), None)
return None
def check_pixel_range(self,image):
max_value=image.copy().max()
min_value=image.copy().min()
if max_value>255 or min_value<0:
raise ValueError('{0} over bundary max:{1} :{2}'.format(self.__class__.__name__,max_value,min_value))
elif max_value-min_value<1:
raise ValueError('{0} almost monotone max:{1} :{2}'.format(self.__class__.__name__,max_value,min_value))
elif np.greater(image.copy(),127.5).astype(np.float32).mean()>0.95:
raise ValueError('{0} almost white max:{1} :{2}'.format(self.__class__.__name__,max_value,min_value))
elif np.less(image.copy(),127.5).astype(np.float32).mean()>0.95:
raise ValueError('{0} almost black max:{1} :{2}'.format(self.__class__.__name__,max_value,min_value))
return image
def _apply_image(self, image,spec:TensorSpec):
raise NotImplementedError
def _apply_coords(self, coords,spec:TensorSpec):
raise NotImplementedError
def _apply_boxes(self, boxes,spec:TensorSpec):
if isinstance( self.output_size,numbers.Number):
self.output_size=( self.output_size, self.output_size)
eh, ew = self.output_size
if ndim(boxes)==0:
return boxes
else:
if ndim(boxes) == 1:
boxes=np.expand_dims(boxes,0)
B=boxes.shape[0]
location= boxes[:, :4]
class_info = boxes[:, 4:5] if boxes.shape[-1]>4 else None
keypoints = boxes[:, 5:] if boxes.shape[-1]>5 else None
idxs = np.array([(0, 1), (2, 1), (0, 3), (2, 3)]).flatten()
location = np.asarray(location).reshape(-1, 4)[:, idxs].reshape(-1, 2)
location = self._apply_coords(location,spec).reshape((-1, 4, 2))
minxy = location.min(axis=1)
maxxy = location.max(axis=1)
if keypoints is not None:
coords_keypoints = np.asarray(keypoints).reshape(-1, 2)
keypoints = self._apply_keypoints(coords_keypoints, spec).reshape((-1, keypoints.shape[-1]))
trans_boxes = np.concatenate((minxy, maxxy), axis=1)
trans_boxes[:, 0::2] =clip(trans_boxes[:, 0::2] , 0, ew)
trans_boxes[:, 1::2] = clip(trans_boxes[:, 1::2],0, eh)
if class_info is not None and class_info.shape[-1]>0:
trans_boxes = np.concatenate((trans_boxes, class_info,keypoints), axis=1)
return trans_boxes
def _apply_mask(self, mask,spec:TensorSpec):
raise NotImplementedError
def _apply_keypoints(self, keypoints,spec:TensorSpec):
coords, visibility = keypoints[..., :2], keypoints[..., 2:]
#trans_coords = [self._apply_coords(p,spec) for p in coords]
trans_coords = self._apply_coords(coords, spec)
return np.concatenate((trans_coords, visibility), axis=-1)
def _apply_polygons(self, polygons,spec:TensorSpec):
return [[self._apply_coords(p,spec) for p in instance] for instance in polygons]
def _apply_labels(self, labels,spec:TensorSpec):
raise NotImplementedError
def __call__(self, inputs: Union[Dict[TensorSpec,np.ndarray],np.ndarray],**kwargs):
spec=kwargs.get('spec')
return self.apply_batch(inputs,spec)
class TextTransform(Transform):
r"""
Base class of all transforms used in computer vision.
Calling logic: apply_batch() -> apply() -> _apply_image() and other _apply_*()
method. If you want to implement a self-defined transform method for image,
rewrite _apply_image method in subclass.
:param order: input type order. Input is a tuple containing different structures,
order is used to specify the order of structures. For example, if your input
is (image, boxes) type, then the ``order`` should be ("image", "boxes").
Current available strings and data type are describe below:
* "image": input image, with shape of `(H, W, C)`.
* "mask": map used for segmentation, with shape of `(H, W, 1)`.
* "category": categories for some data type. For example, "image_category"
means category of the input image and "boxes_category" means categories of
bounding boxes.
* "info": information for images such as image shapes and image path.
You can also customize your data types only if you implement the corresponding
_apply_*() methods, otherwise ``NotImplementedError`` will be raised.
"""
def __init__(self, name=None):
super().__init__(name=name)
self._text_info = None
def _precalculate(self, textdata, **kwargs):
pass
def apply_batch(self, inputs: Sequence[Tuple],spec:Optional[TensorSpec]=None):
r"""Apply transform on batch input data."""
if not isinstance(inputs,OrderedDict) :
self._text_info = None
if spec is None and self.is_spatial==True:
spec = TensorSpec(shape=tensor_to_shape(inputs,need_exclude_batch_axis=True,is_singleton=True), object_type=ObjectType.corpus)
self._precalculate(inputs)
return self.apply(inputs, spec)
else:
results=OrderedDict()
self._text_info = None
is_precalculate=False
for k,v in inputs.items():
if k.object_type is None:
k.object_type=object_type_inference(v)
if isinstance(k,TensorSpec) and k.object_type==ObjectType.corpus:
self._precalculate(v)
is_precalculate=True
if not is_precalculate:
self._precalculate(inputs.value_list[0])
for spec, data in inputs.items():
results[spec]=self.apply(data, spec)
return results
def apply(self, input: Tuple,spec:TensorSpec):
r"""Apply transform on single input data."""
if spec is None:
return self._apply_corpus(input,None)
apply_func = self._get_apply(spec.object_type.value)
if apply_func is None:
return input
else:
return apply_func(input,spec)
def _get_apply(self, key):
if key is None or 'corpus' in key :
return getattr(self, "_apply_{}".format('corpus'), None)
elif 'sequence_label' in key:
return getattr(self, "_apply_{}".format('sequence_label'), None)
elif 'sequence_mask' in key:
return getattr(self, "_apply_{}".format('sequence_mask'), None)
return None
def _apply_corpus(self, corpus,spec:TensorSpec):
raise NotImplementedError
def _apply_sequence(self, sequence,spec:TensorSpec):
raise NotImplementedError
def _apply_sequence_labels(self, labels,spec:TensorSpec):
raise NotImplementedError
def _apply_sequence_mask(self, mask,spec:TensorSpec):
raise NotImplementedError
def __call__(self, inputs: Union[Dict[TensorSpec,np.ndarray],np.ndarray],**kwargs):
spec=kwargs.get('spec')
return self.apply_batch(inputs,spec)
| true |
45b41779c982ac9a14386029fb1e39391f0cf79d
|
Python
|
hilltran/Python2020
|
/RemoveSpaceBetweenLines.py
|
UTF-8
| 635 | 3.09375 | 3 |
[] |
no_license
|
# output = open('/Users/hieu/Desktop/1_Excel/deleteBlankLinesOutputFile3-20.txt', 'w')
output = open('deleteBlankLinesOutputFile3-20.txt', 'w')
#remove blank lines by opening the original file and r = read only
# with open('/Users/hieu/Desktop/1_Excel/removeBlankLine.txt','r') as file:
with open('removeBlankLine.txt','r') as file:
for line in file:
if not line.isspace():
#save each line that has content
output.write(line)
# print(line)
#the print function create an extra line but the actual file doesn't
# close file once the content has been added.
output.close()
| true |
59026612a61c66280d7b3f4e6a23d0347da9e97a
|
Python
|
CleitonSilvaT/URI_Python
|
/1-Iniciante/1013.py
|
UTF-8
| 135 | 3.5 | 4 |
[] |
no_license
|
# -*- coding: utf-8 -*
info = input().split(' ')
a = int(info[0])
b = int(info[1])
c = int(info[2])
print(max(a, b, c), "eh o maior")
| true |
e17b8bd38e2c96b849ba77f8487a9e389f3af47e
|
Python
|
tahertaher0511/pythonProject14
|
/main.py
|
UTF-8
| 4,879 | 3.40625 | 3 |
[] |
no_license
|
import random
class Game:
def __init__(self):
self.board = {(x, y): " " for y in range(3, 0, -1)
for x in range(1, 4)}
self.available_co = list(self.board.keys())
self.moves = {"X": 0, "O": 0, " ": 0}
self.user = User("user")
self.comp = Computer("comp")
self.game_on = True
self.whos_turn = self.user # can change in future
self.winner = ""
self.game()
def game(self):
while self.game_on:
Game.draw_board(self.board)
if self.whos_turn.p_name == "comp":
move, m_char = self.comp.make_move(self.available_co)
self.available_co.remove(move)
self.board[move] = m_char
self.moves[m_char] += 1
self.whos_turn = self.user
else:
move, m_char = self.user.check_commit(self.available_co)
self.available_co.remove(move)
self.board[move] = m_char
self.moves[m_char] += 1
self.whos_turn = self.comp
self.winner = self.declare_winner()
if self.winner != "":
self.game_on = False
self.draw_board(self.board)
print(self.winner)
@staticmethod
def draw_board(the_board):
print("-" * len(the_board))
for y in range(3, 0, -1):
print("| ", end="")
for x in range(1, 4):
print(f"{the_board[(x, y)]} ", end="")
print("|")
print("-" * len(the_board))
@staticmethod
def checker(the_list):
for item in the_list:
if item[0] != item[1] or item[0] == " ":
continue
else:
if item[1] == item[2]:
return True, item[2]
else:
continue
return False, ""
def declare_winner(self):
all_rows, self.winner = self.checker([[self.board[x, y]
for x in range(1, 4)]
for y in range(1, 4)])
if all_rows:
return f"{self.winner} wins"
all_columns, self.winner = self.checker([[self.board[x, y]
for y in range(1, 4)]
for x in range(1, 4)])
if all_columns:
return f"{self.winner} wins"
main_diagonal = [self.board[x, y] for x, y in zip(range(1, 4),
range(3, 0, -1))]
side_diagonal = [self.board[x, x] for x in range(1, 4)]
diagonals, self.winner = self.checker([main_diagonal, side_diagonal])
if diagonals:
return f"{self.winner} wins"
moves_counter = sum([value_ for move, value_ in self.moves.items()
if move != " "])
if moves_counter == 9:
return "Draw"
return ""
class Player:
def __init__(self, player_name, which_char):
self.p_name = player_name
self.play_char = which_char
class User(Player):
def __init__(self, name, p_char="X"):
super().__init__(name, p_char)
self.move = ""
def check_commit(self, av_co):
self.move = input("Enter the coordinates: ").split()
while True:
if not all(list(map(self.elementary, self.move))):
print("You should enter numbers!")
self.move = input("Enter the coordinates: ").split()
else:
if not self.is_in_range(self.move):
print("Coordinates should be from 1 to 3!")
self.move = input("Enter the coordinates: ").split()
continue
if self.is_occupied(self.move, av_co):
print("This cell is occupied! Choose another one!")
self.move = input("Enter the coordinates: ").split()
continue
break
return tuple([int(point) for point in self.move]), self.play_char
@staticmethod
def elementary(the_str):
return the_str.isdigit()
@staticmethod
def is_occupied(coordinate, av_co):
return tuple([int(point) for point in coordinate]) not in av_co
@staticmethod
def is_in_range(coordinate):
temp_list = list(map(int, coordinate))
return all([True if 1 <= point <= 3 else False for point in temp_list])
class Computer(Player):
def __init__(self, name, level="easy", p_char="O"):
super().__init__(name, p_char)
self.level = level
def make_move(self, av_co):
print(f'Making move level "{self.level}"')
return random.choice(av_co), self.play_char
if __name__ == "__main__":
Game()
| true |
c6ca4bf00dfc8504f4725f0f2ffb190e0ab37e9b
|
Python
|
vkWeb/vikivedia
|
/encyclopedia/util.py
|
UTF-8
| 1,584 | 3.046875 | 3 |
[] |
no_license
|
import re
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
# Enter directory name relative to BASE_DIR
ENTRIES_DIR_NAME = "entries"
def list_entries():
"""
Returns a list of all names of encyclopedia entries.
"""
_, filenames = default_storage.listdir(ENTRIES_DIR_NAME)
return list(sorted(re.sub(r"\.md$", "", filename)
for filename in filenames if filename.endswith(".md")))
def save_entry(title, content):
"""
Saves an encyclopedia entry, given its title and Markdown
content. If an existing entry with the same title already exists,
it is replaced.
"""
filename = f"{ENTRIES_DIR_NAME}/{title}.md"
if default_storage.exists(filename):
default_storage.delete(filename)
default_storage.save(filename, ContentFile(content))
def get_entry(title):
"""
Retrieves an encyclopedia entry (utf-8 decoded) by its title. If no such
entry exists, the function returns None.
"""
try:
f = default_storage.open(f"{ENTRIES_DIR_NAME}/{title}.md")
return f.read().decode("utf-8")
except FileNotFoundError:
return None
def delete_all_entries():
"""
Deletes all the entries. Returns True if all the entries are deleted.
If no entries are found the function returns None.
"""
entries_title = list_entries()
if entries_title:
for entry_title in entries_title:
default_storage.delete(f"{ENTRIES_DIR_NAME}/{entry_title}.md")
return True
else:
return None
| true |
0a26109b14c1c9a6b6658d77d9aaf137d73cd32f
|
Python
|
shivaj15/klc
|
/sum1.py
|
UTF-8
| 146 | 2.65625 | 3 |
[] |
no_license
|
#!/usr/bin/python
print str(sum((11,12,13,14,15,16,17)))
print str(sum((11,12,13,14,15,16,17)))[0:1]
print str(sum((11,12,13,14,15,16,17)))[1:2]
| true |