blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
849a2fdf44397c065660f4aade3876157bc151b9
|
Python
|
PraderioM/GamePlatform
|
/backend/games/common/models/game.py
|
UTF-8
| 4,849 | 2.703125 | 3 |
[] |
no_license
|
import abc
import asyncpg
import json
from random import shuffle
from typing import Dict, List, Optional
from .game_component import GameComponent
from .play import Play
from .player import Player
class Game(GameComponent):
def __init__(self, current_player_index: int,
play_list: List[Play], player_list: List[Player],
id_: Optional[str],
n_actions: int):
self.current_player_index = current_player_index
self.play_list = play_list[:]
self.player_list = player_list[:]
self.id = id_
self._n_actions = n_actions
def to_database(self) -> Dict:
return {
'current_player_index': self.current_player_index,
'plays': json.dumps([play.to_database() for play in self.play_list]),
'players': json.dumps([player.to_database() for player in self.player_list]),
'id': self.id,
'n_actions': self._n_actions,
}
@abc.abstractmethod
def to_display(self) -> Dict:
raise NotImplementedError('Sub-classes must implement to frontend method.')
@abc.abstractmethod
def to_frontend(self, db: Optional[asyncpg.Connection] = None, *args, **kwargs) -> Dict:
raise NotImplementedError('Sub-classes must implement to frontend method.')
@abc.abstractmethod
def get_player_score(self, player: Player) -> int:
raise NotImplementedError('Sub-classes must implement get player points method.')
def to_game_resolution(self, player: Optional[Player]) -> Dict[str, bool]:
if player is None:
return {'isObserver': True}
scores_dict = self.get_all_player_scores()
sorted_scores = sorted([score for _, score in scores_dict.items()], reverse=True)
player_score = scores_dict[player.name]
if player_score == sorted_scores[0]:
if player_score == sorted_scores[1]:
return {'isTie': True}
else:
return {'isVictorious': True}
else:
return {'isLoser': True}
def get_all_player_scores(self) -> Dict[str, int]:
return {player.name: self.get_player_score(player) for player in self.player_list}
def resolution_points(self, player: Player):
scores_dict = self.get_all_player_scores()
sorted_scores = sorted([score for _, score in scores_dict.items()], reverse=True)
player_score = scores_dict[player.name]
above_players = len([score for score in sorted_scores if score > player_score])
below_players = len([score for score in sorted_scores if score < player_score])
return below_players - above_players
def add_play(self, play: Optional[Play]):
if play is None:
return
if play.player in self.player_list:
if self.player_list.index(play.player) == self.current_player_index:
self.play_list.append(play)
self.update_player_index()
def update_player_index(self):
self.current_player_index = (self.current_player_index + 1) % len(self.player_list)
def update_n_actions(self):
self._n_actions += 1
def get_player_by_name(self, name: str) -> Optional[Player]:
for player in self.player_list:
if player.name == name:
return player
return None
def add_new_player_name(self, name: str):
# Cannot add twice the same player.
for player in self.player_list:
if player.name == name:
return
if self.n_missing > 0:
player_list = self.player_list[:]
# Make player position random.
shuffle(player_list)
for player in player_list:
if player.name is None and not player.is_bot:
player.name = name
break
def is_winner_points(self, resolution_points: int) -> bool:
return resolution_points == len(self.player_list) - 1
@property
def n_bots(self) -> int:
return len([player for player in self.player_list if player.is_bot])
@property
def n_players(self) -> int:
return len([player for player in self.player_list if not player.is_bot])
@property
def n_current(self) -> int:
return len([player for player in self.player_list if player.name is not None and not player.is_bot])
@property
def current_player(self) -> Player:
return self.player_list[self.current_player_index]
@property
def n_missing(self) -> int:
return self.n_players - self.n_current
@property
def n_actions(self):
return self._n_actions
@property
@abc.abstractmethod
def has_ended(self) -> bool:
raise NotImplementedError('Sub-classes must implement `has_ended` property')
| true |
093ea03d92d9d774665cd2979e23c66da9f05968
|
Python
|
kongzhidea/leetcode
|
/Number Complement.py
|
UTF-8
| 307 | 2.640625 | 3 |
[] |
no_license
|
class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
bx = bin(num)
r = ["0","b"]
for i in xrange(2,len(bx)):
r.append( "0" if bx[i] == "1" else "1")
bx = "".join(r)
return int(bx,2)
| true |
7eaa163d63b43cf819083a1b513400dbe10b6c76
|
Python
|
dalong2018/PythonDemo
|
/biaoqingbao/kk.py
|
UTF-8
| 615 | 3.203125 | 3 |
[] |
no_license
|
import matplotlib.pyplot as plt
import numpy as np
#Generate a random number, you can refer your data values also
data = np.random.rand(4,2)
rows = list('1234') #rows categories
columns = list('MF') #column categories
fig,ax=plt.subplots()
#Advance color controls
ax.pcolor(data,cmap=plt.cm.Reds,edgecolors='k')
ax.set_xticks(np.arange(0,2)+0.5)
ax.set_yticks(np.arange(0,4)+0.5)
# Here we position the tick labels for x and y axis
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
#Values against each labels
ax.set_xticklabels(columns,minor=False,fontsize=20)
ax.set_yticklabels(rows,minor=False,fontsize=20)
plt.show()
| true |
8cd83465a977d5e097b01c80d61d907234555b1c
|
Python
|
hairuo55/TTS-frontend
|
/src/text_normalizer/car_number.py
|
UTF-8
| 1,490 | 3.515625 | 4 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
@Descripttion:
@Author: Markus
@Date: 2020-04-16 14:07:51
@LastEditors: Markus
@LastEditTime: 2020-04-16 15:44:10
"""
import re
from src.text_normalizer.digit import digit_normalize
SHORT_PAUSE_SYMBOL = " " # 短停顿文本标志
LONG_PAUSE_SYMBOL = " " # 长廷顿文本标志
class CarNumber:
def __init__(self):
""" 车牌号码正则: 前缀+后端序列
前缀: 前缀由各地省份缩写+大写字母构成
后端序列: 序列为4-6位数字+大写字母构成
"""
car_prefix_re = \
r"([京津沪渝蒙新藏宁桂港澳黑吉辽晋冀青鲁豫苏皖浙闽赣湘鄂粤琼甘贵云陕台][A-Z])"
car_sequence_pattern = r"([\dA-Z]{4,6})"
car_num_re = "({0}\s?{1})".format(car_prefix_re, car_sequence_pattern)
self.car_number_re = re.compile(car_num_re)
def normalize(self, text):
""" 在车牌首字和车牌尾序列之间添加停顿 """
matchers = self.car_number_re.findall(text)
if matchers:
for matcher in matchers:
target = matcher[0]
target = target.replace(" ", "") # 移除"粤A D74821"中的空格
prefix = target[:2]
remain = digit_normalize(target[2:])
target = prefix + SHORT_PAUSE_SYMBOL + remain
text = text.replace(matcher[0], target)
return text
if __name__ == '__main__':
# 测试
print(CarNumber().normalize('我的车牌是粤AD74821。'))
print(CarNumber().normalize('我的车牌是粤A D74821。'))
| true |
71cb93c4b91b4b2cceae396e559fd844d856f3d6
|
Python
|
Openwarfare/PyXbee
|
/pyxbee_src/pyxbee/datatypes.py
|
UTF-8
| 3,331 | 2.9375 | 3 |
[] |
no_license
|
#TODO:These types need to be reworked so they make more sense and are easier to use.
#the current implementation is clumsy and need to be refined. Think about moving
#the validators into the type and only storing the value in the type. Right now the
#value is stored in the type and or the InputField value which is weird
#TODO: Get rid of these types altogether perhaps. The only two types are numbers
#and strings.
class Type:
validator=None
class Raw:
value=None
def __init__(self,value=None):
self.value=value
def __str__(self):
return '%s'%self.value
def serial(self):
return '%s'%self.value
class String(Raw):
def __setattr__(self,name,value):
if name is 'value':
if value is None:
pass
else:
value=str(value)
self.__dict__[name]=value
def __len__(self):
return len(self.value)
class HexString(String):
def __setattr__(self,name,value):
if name is 'value':
if value is None:
pass
else:
value=str(value)
value=value[2:]
self.__dict__[name]=value
class Hex(Raw):
def __setattr__(self,name,value):
if name is 'value':
if value is None:
pass
elif type(value) is str:
value=int(value.strip(),16)
else:
value=int(value)
self.__dict__[name]=value
def __str__(self):
if self.value:
return hex(self.value)
else:
return '%s'%self.value
def serial(self):
if self.value:
return hex(self.value)[2:]
return ''
class Int(Hex):
def __setattr__(self,name,value):
if name is 'value':
if value is None:
pass
elif type(value) is str:
value=int(value.strip())
else:
value=int(value)
self.__dict__[name]=value
def __str__(self):
return '%s'%self.value
class Binary(Hex):
def __setattr__(self,name,value):
if name is 'value':
if value is None:
self.__dict__['value']=None
elif type(value) is str:
self.__dict__['value']=int(value,2)
else:
self.__dict__['value']=int(value)
else:
self.__dict__[name]=value
def __str__(self):
if self.value:
return bin(self.value)
else:
return '%s'%self.value
class Choice(Int):
choice_type=int
choices=None
value=None
def __init__(self,value=None,choices=None):
self.choices=dict()
if choices:
self.choices=choices
if value:
self.value=self.choice_type(value)
def add(self,name,value):
self.choices[name]=self.choice_type(value)
def name(self,choice):
return self.choices.keys()[self.choices.values().index(choice)]
def __setattr__(self,name,value):
self.__dict__[name]=value
def __str__(self):
return "<Choice:%s(%s)>"%(self.value,self.name(self.value))
| true |
3fbc16c21fbb0aa892d070f9c52cef481610349e
|
Python
|
reconstruir/bes
|
/lib/bes/text/line_continuation_merger.py
|
UTF-8
| 4,182 | 2.578125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from bes.system.log import log
from ..system.check import check
from .text_line import text_line
class _state(object):
def __init__(self, parser):
self.name = self.__class__.__name__[1:]
log.add_logging(self, tag = '%s.%s' % (parser.__class__.__name__, self.name))
self.parser = parser
def handle_token(self, token):
raise RuntimeError('unhandled handle_token(%s) in state %s' % (token, self.name))
def change_state(self, new_state, token):
self.parser._change_state(new_state, 'token="%s"' % (str(token)))
def unexpected_token(self, token):
raise RuntimeError('unexpected token in %s state: %s' % (token, self.name))
class _state_expecting_line(_state):
def __init__(self, parser):
super(_state_expecting_line, self).__init__(parser)
def handle_token(self, token):
self.log_d('handle_token(%s)' % (str(token)))
new_state = None
if token is None:
new_state = self.parser.STATE_DONE
elif token.ends_with_continuation:
new_state = self.parser.STATE_CONTINUATION
assert not self.parser._buffer
self.parser._buffer = [ token ]
assert not self.parser._blank_buffer
self.parser._blank_buffer = [ text_line(token.line_number + 1, '') ]
else:
new_state = self.parser.STATE_EXPECTING_LINE
yield [ token ]
self.change_state(new_state, token)
class _state_continuation(_state):
def __init__(self, parser):
super(_state_continuation, self).__init__(parser)
def handle_token(self, token):
self.log_d('handle_token(%s)' % (str(token)))
new_state = None
assert self.parser._buffer
self.parser._buffer.append(token)
if token is None:
raise RuntimeError('Unexpected end of tokens expecting a continuation in state %s' % (self.name))
elif token.ends_with_continuation:
new_state = self.parser.STATE_CONTINUATION
self.parser._blank_buffer.append(text_line(token.line_number + 1, ''))
else:
new_state = self.parser.STATE_EXPECTING_LINE
yield self.parser._buffer
for blank_line in self.parser._blank_buffer:
yield [ blank_line ]
self.parser._buffer = None
self.parser._blank_buffer = None
self.change_state(new_state, token)
class _state_done(_state):
def __init__(self, parser):
super(_state_done, self).__init__(parser)
def handle_token(self, token):
self.log_d('handle_token(%s)' % (str(token)))
raise RuntimeError('Unexpected token(%s) in state %s' % (token, self.name))
return []
class line_continuation_merger(object):
def __init__(self):
log.add_logging(self, tag = 'line_continuation_merger')
self.STATE_CONTINUATION = _state_continuation(self)
self.STATE_DONE = _state_done(self)
self.STATE_EXPECTING_LINE = _state_expecting_line(self)
self.state = self.STATE_EXPECTING_LINE
self._buffer = None
self._blank_buffer = None
def _run(self, tokens):
self.log_d('_run(%s)' % (tokens))
for token in [ t for t in tokens ] + [ None ]:
for result in self.state.handle_token(token):
if len(result) == 1:
line = result[0]
self.log_i('parse: new untouched line: \"%s\"' % (str(line)))
yield line
else:
merged_line = text_line.merge(result)
self.log_i('parse: new merged line: \"%s\"' % (str(merged_line)))
yield merged_line
assert self.state == self.STATE_DONE
def _change_state(self, new_state, msg):
assert new_state
if new_state != self.state:
self.log_d('transition: %20s -> %-20s; %s' % (self.state.__class__.__name__, new_state.__class__.__name__, msg))
self.state = new_state
@classmethod
def merge(clazz, lines):
'Merge a sequence of text_line objects. Yield a sequence with line'
'continuations merged into one. The resulting empty lines are kept with'
'empty text.'
check.check_text_line_seq(lines)
return clazz()._run(lines)
@classmethod
def merge_to_list(clazz, lines):
check.check_text_line_seq(lines)
return [ x for x in clazz.merge(lines) ]
| true |
018ef0025a77f0f251ebf0c5e1006d45dfad79ca
|
Python
|
SMS-NED16/crash-course-python
|
/python_work/chapter_10/exercises/10_5_programming_poll.py
|
UTF-8
| 564 | 3.6875 | 4 |
[] |
no_license
|
"""
Asks users to enter their resaons for liking programming
Stores responses in a file called 'responses.txt'
"""
filename = "text_files/responses.txt"
entry_flag = True
while entry_flag:
print("Welcome to our programming survey. Enter 'q' to quit at any time.")
user_response = input("Why do you like programming? ")
if user_response.lower() == 'q':
print("Thank you for taking our survey!\n")
entry_flag = False
else:
print("Thank you for taking our survey!\n")
with open(filename, 'a') as file_object:
file_object.write(user_response + "\n")
| true |
af581ee8c424cd64be7252a647376ccd980ecd64
|
Python
|
hglad/easygan
|
/easygan/nets/CGen.py
|
UTF-8
| 2,547 | 2.75 | 3 |
[] |
no_license
|
import torch.nn as nn
import torch
class CGen(nn.Module):
def __init__(self, n_classes=26, c=64):
super(CGen, self).__init__()
"""
Generates 90 x 160 x 3 image using labels (h, w, c)
"""
self.embed = nn.Sequential(
nn.Embedding(n_classes, 50),
nn.Linear(50, 16))
self.conv0 = nn.Sequential(
nn.ConvTranspose2d(116, 8*c, kernel_size=(6,9), stride=1, padding=0, bias=False),
nn.BatchNorm2d(8*c),
nn.ReLU(False),
nn.ConvTranspose2d(8*c, 4*c, kernel_size=(4,6), stride=2, padding=1, bias=False),
nn.BatchNorm2d(4*c),
nn.ReLU(False),
nn.ConvTranspose2d(4*c, 2*c, kernel_size=(2,4), stride=2, padding=1, bias=False),
nn.BatchNorm2d(2*c),
nn.ReLU(False),
nn.ConvTranspose2d(2*c, c, kernel_size=(5,4), stride=2, padding=1, bias=False),
nn.BatchNorm2d(c),
nn.ReLU(False),
nn.ConvTranspose2d(c, 3, kernel_size=4, stride=2, padding=1, bias=False),
nn.Tanh()
)
def forward(self, latent_vector, labels): # supply z and y
y_layer = self.embed(labels).squeeze(1)
x = torch.cat((latent_vector, y_layer), dim=1)
# print (x.shape)
x = x.unsqueeze(2).unsqueeze(2) # dimensions [batch_size, 116, 1, 1]
# print (x.shape)
x = self.conv0(x) # dimensions [batch_size, 3, 90, 160]
# print (x.shape)
return x
"""
# OLD
self.conv0 = nn.Sequential(
nn.ConvTranspose2d(116, 8*c, kernel_size=(6,9), stride=1, padding=0, bias=False),
nn.BatchNorm2d(8*c),
nn.ReLU(False),
nn.ConvTranspose2d(8*c, 4*c, kernel_size=(3,5), stride=(2,2), padding=1, bias=False),
nn.BatchNorm2d(4*c),
nn.ReLU(False),
nn.ConvTranspose2d(4*c, 2*c, kernel_size=(4,5), stride=(2,2), padding=1, bias=False),
nn.BatchNorm2d(2*c),
nn.ReLU(False),
# nn.ConvTranspose2d(2*c, 2*c, kernel_size=(3,5), stride=2, padding=1, bias=False),
# nn.BatchNorm2d(2*c),
# nn.LeakyReLU(0.2, inplace=False),
# nn.ReLU(False),
nn.ConvTranspose2d(2*c, c, kernel_size=(4,5), stride=2, padding=1, bias=False),
nn.BatchNorm2d(c),
nn.ReLU(False),
nn.ConvTranspose2d(c, 3, kernel_size=(4,4), stride=2, padding=0, bias=False),
nn.Tanh()
)
"""
| true |
9769ef32fe53a03e7e3b855079f25c2f7d7944ff
|
Python
|
mitmedialab/sherlock-project
|
/tests/test_helpers.py
|
UTF-8
| 1,308 | 3.03125 | 3 |
[
"MIT"
] |
permissive
|
from unittest import TestCase
from sherlock.features.helpers import literal_eval_as_str, keys_to_csv
class Test(TestCase):
def test_literal_eval_as_str(self):
s1 = "['Krista Construction Ltd.', None, None, None, \"L'Expert de Parcs J. Aubin Inc.\", 'Lari, Construction', 0.89]"
result = literal_eval_as_str(s1)
assert result == ['Krista Construction Ltd.', None, None, None, "L'Expert de Parcs J. Aubin Inc.",
'Lari, Construction', '0.89']
def test_literal_eval_as_str_blank(self):
s1 = ""
result = literal_eval_as_str(s1)
assert result == []
def test_literal_eval_as_str_empty_array(self):
s1 = "[]"
result = literal_eval_as_str(s1)
assert result == []
def test_literal_eval_as_str_multiple_commas_in_string(self):
s1 = "['I have, multiple commas, in string which should, be preserved, ', ', another']"
result = literal_eval_as_str(s1)
assert result == ['I have, multiple commas, in string which should, be preserved, ',
', another']
def test_keys_to_csv(self):
result = keys_to_csv(['n_[0]-agg-any', 'n_[,]-agg-any', 'n_["]-agg-any'])
assert result == '"n_[0]-agg-any","n_[,]-agg-any","n_[""]-agg-any"\r\n'
| true |
a562facf57fc473cd928d40891c093c567f7ec2f
|
Python
|
TheBFR/AutomateBoring
|
/listLoop.py
|
UTF-8
| 1,045 | 3.8125 | 4 |
[] |
no_license
|
myList = [1,2,3,4,5]
for i in range(len(myList)):
print (myList[i])
print ("1 is in list myList in index slot",myList.index(1))
# if you do this and there are repeats then it will just return index of first occurrence
print(myList)
myList.append(6)
print(myList)
myList.insert(2,15)
print(myList)
myList.remove(15)
# if you do this and there are repeats then it will just remove first occurrence
print(myList)
del myList[0]
print(myList)
myOrderedList = [10,6,8,12,14,45,1,19]
print (myOrderedList)
myOrderedList.sort()
print (myOrderedList)
myNameList = ['Troy', 'Tobias', 'Ruby', 'Kathy']
print (myNameList)
myNameList.sort()
print (myNameList)
myNameList.sort(reverse=True)
print (myNameList)
# If the list has both string and numbers(integers or floats) it cant sort it
myMixList = ['A','z','b','x','T','F','Z','a','b','n','M']
print (myMixList)
myMixList.sort()
print (myMixList)
# Also case sorting is based on ASCII so capital letters come first
# for true sort you can pass
myMixList.sort(key=str.lower)
print (myMixList)
| true |
53b1e4d417128d0c7282c23278af38658ce84259
|
Python
|
Jordi-Ab/Dissertation
|
/1D/FFT/1dFFT_main.py
|
UTF-8
| 1,907 | 3.28125 | 3 |
[] |
no_license
|
# coding: utf-8
from math import *
import numpy as np
from scipy.integrate import ode
from scipy.fftpack import fft
import FiringRate as fr
import ConnectivityKernel as ck
import NeuralFieldFFT as nf
import matplotlib.pyplot as pt
import warnings
warnings.simplefilter("ignore")
# Spatial Grid
nx = (2**10)-1 # Number of points in the space grid
Lx = 30 # Space grid limits
hx = 2*Lx/nx # Space width between points in the grid
x = np.linspace(-Lx, Lx-hx, nx) # nx equally spaced points between -Lx and Lx-hx
# Initialize Connectivity Kernel
b=1
kernel = ck.ConnectivityKernel1(b)
# Plot the Chosen Connectivity Kernel:
plot_kernel = True
if(plot_kernel):
fig = pt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(x, kernel(x))
ax1.set_title('Connectivity Kernel')
ax1.set_xlabel('x')
ax1.set_ylabel('w(x)')
# Heterogenous Vector
_a = .30
_epsilon = 1
A_vec = (1 + _a*np.cos(x/_epsilon))
# FFT of Synaptic Kernel
W = kernel(x)
W_hat = np.real(fft(W))
# Initialize Firing Rate
mu = 50
theta = 0.5
f_rate = fr.FiringRate1(mu, theta)
# Initialize Neural Field
neural_field = nf.NeuralFieldFFT(f_rate, W_hat, Lx, nx, A_vec)
# Use Standard ODE Solvers
#Initial Conditions
A0 = 10; alpha = 0.1 # Parameters of Initial Conditions Function
initCond = lambda x_vec: A0/(np.cosh(alpha*x_vec)**2)
u0 = initCond(x)
# Time step using RungeKutta45
method = ode(neural_field).set_integrator("dopri5")
method.set_initial_value(u0)
final_t = 100
dt = 0.1
sol = []
while method.t < final_t:
next_u = method.integrate(method.t+dt)
sol.append(next_u)
#Plot
fig = pt.figure(figsize=(15,4))
ax1 = fig.add_subplot(121)
ax1.plot(x, u0)
ax1.set_title('Synaptic Activity at Initial Time.')
ax1.set_xlabel('x')
ax1.set_ylabel('u(x)')
ax1 = fig.add_subplot(122)
ax1.plot(x, sol[-1])
ax1.set_title('Synaptic Activity at Final Time.')
ax1.set_xlabel('x')
ax1.set_ylabel('u(x)')
pt.show()
| true |
8630abed2511888ac398537c308f36a2ae6a8a61
|
Python
|
TaRyu/fx
|
/exam/data_features.py
|
UTF-8
| 3,527 | 2.78125 | 3 |
[] |
no_license
|
"""此程序提取了特征值和目标值。时间尺度为24个交易日。"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
import numpy as np
FX_LIST = ['EURUSD', 'USDJPY', 'GBPUSD']
FILE_PREX = '../../../data/fx'
NUM_PIX = 24 * 24
SCALE = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def get_fs_t(file_in, file_out, i):
data = pd.read_pickle(file_in)['close']
data = data.reshape(-1, 24)
data = np.float32([data[i:i + 24]
for i in range(data.shape[0] - 24 + 1)])
data = data.reshape(-1, NUM_PIX)
data_t = {
'open_price': np.float32([data[i][0]
for i in range(data.shape[0] - i)]),
'close_price': np.float32([data[i][-1]
for i in range(data.shape[0] - i)]),
'max_price': np.float32([data[i].max()
for i in range(data.shape[0] - i)]),
'min_price': np.float32([data[i].min()
for i in range(data.shape[0] - i)]),
'mean_price': np.float32([data[i].mean()
for i in range(data.shape[0] - i)]),
'median_price': np.float32([np.median(data[i])
for i in range(data.shape[0] - i)]),
'buy_or_sell': np.int_(
[int(data[i + i][-1] > data[i + i][0])
for i in range(data.shape[0] - i)]),
'change': np.float32(
[(data[i + i][-1] - data[i + i][0]) /
data[i + i][0] * 100
for i in range(data.shape[0] - i)]),
'target_open': np.float32([data[i + i][0]
for i in range(data.shape[0] - i)]),
'real_target': np.float32([data[i + i][-1]
for i in range(data.shape[0] - i)])
}
data_t = pd.DataFrame(data_t)
np.save(file_out[0], data[:len(data) - i])
data_t.to_pickle(file_out[1])
def get_24(i):
for fx in FX_LIST:
file_in = '%s/H/%s.pkl' % (FILE_PREX, fx)
file_out = ['%s/Fs/%s_%i.npy' %
(FILE_PREX, fx, i),
'%s/T/%s_%i.pkl' %
(FILE_PREX, fx, i)]
get_fs_t(file_in, file_out, i)
def get_fs_t_5(file_in, file_out, i):
data = pd.read_pickle(file_in)['close']
data = data.reshape(-1, 24)
data = np.float32([[data[i + x][-1] for
x in range(5 * i) if x % i == 0]
for i in range(len(data) - 5 * i + 1)])
data = data.reshape(-1, 5)
data_t = {
'change': np.float32(
[(data[i + i][-1] - data[i + i][0]) /
data[i + i][0] * 100
for i in range(data.shape[0] - i)]),
'target_open': np.float32([data[i + i][0]
for i in range(data.shape[0] - i)]),
'real_target': np.float32([data[i + i][-1]
for i in range(data.shape[0] - i)])
}
data_t = pd.DataFrame(data_t)
np.save(file_out[0], data[:len(data) - i])
data_t.to_pickle(file_out[1])
def get_5(i):
for fx in FX_LIST:
file_in = '%s/H/%s.pkl' % (FILE_PREX, fx)
file_out = ['%s/Fs/%s_5_%i.npy' %
(FILE_PREX, fx, i),
'%s/T/%s_5_%i.pkl' %
(FILE_PREX, fx, i)]
get_fs_t(file_in, file_out, i)
if __name__ == '__main__':
for i in SCALE:
get_24(i)
get_5(i)
| true |
43bd989f78c86ad99ceec5beaa575bbcfaeae45c
|
Python
|
hyunseok4384/Data
|
/tmp/Placeholder.py
|
UTF-8
| 242 | 2.65625 | 3 |
[] |
no_license
|
import tensorflow as tf
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b
sess = tf.Session()
print(sess.run(adder_node, feed_dict = {a:3, b:4.5}))
print(sess.run(adder_node, feed_dict = {a:[1,3], b:[2,4]}))
| true |
c94dde3d0cb307853554114b8ee9f9c3706e9013
|
Python
|
sudhanshu-jha/Scrapers
|
/Product-Info-Crawler/product_info_crawler/spiders/olx.py
|
UTF-8
| 2,324 | 2.625 | 3 |
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.http import Request
import re
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>')
raw_html.encode('ascii','ignore')
cleantext = re.sub(cleanr, '', raw_html)
cleantext=cleantext.strip()
cleantext=re.sub('\s+',' ',cleantext)
return cleantext
class OlxSpider(CrawlSpider):
name = 'olx'
def __init__(self, product='apple', domain=None, *args, **kwargs):
super(OlxSpider, self).__init__(*args, **kwargs)
self.product_name=product.lower()
self.product_name=re.sub("[^ a-zA-Z0-9\-]+", "", self.product_name)
self.product_name.replace(' ','-')
self.search_url='https://www.olx.in/all-results/q-'+self.product_name
self.allowed_domains = ['www.olx.in']
self.start_urls = [self.search_url]
rules = (
Rule(LinkExtractor(allow=(), tags=('a'),attrs=('href'),restrict_css=('.pageNextPrev',)),
callback="parse_items",
follow=True),)
def parse_start_url(self,response):
request=Request("https://www.olx.in/all-results/q", callback=self.parse_items)
return request
def parse_items(self, response):
print 'Processing...',response.url
title=[]
image=[]
price=[]
url=[]
for item in response.css('table#offers_table tr td'):
item_title=item.css('h3 span::text').extract_first()
item_image=item.css('a.thumb img::attr(src)').extract_first()
item_price=item.css('p.price strong::text').extract_first()
item_url=item.css('a.thumb::attr(href)').extract_first()
if(item_title and item_image and item_price and item_url):
title.append(cleanhtml(item_title))
image.append(cleanhtml(item_image))
price.append('Rs. '+ cleanhtml(item_price))
url.append(cleanhtml(item_url))
print 'Result Counts: ',len(title)
for item in zip(title,price,image,url):
scraped_info = {
'product_name' : item[0],
'price' : item[1],
'image_url' : item[2],
'product_url': item[3],
'source': 'olx.in'
}
yield scraped_info
| true |
66e29ac4046f865c82d790732fe674884af92dc4
|
Python
|
heonmono/JustDoIT
|
/Programming/Algorithm/FastCampus/DP_DC.py
|
UTF-8
| 1,227 | 4.09375 | 4 |
[] |
no_license
|
# 동적 계획법 & 분할 정복(Divide and Conquer)
'''
동적 계획법(Dynamic Programming
작은 문제 해결 후 , 큰 부분을 해결하여 알고리즘만드는 것
최하위 해답 구한 후 저장하여 상위 문제 해결
Memoizaition 기법 - 이전 계산 값 저장하여 다시 계산하지 않아 빠르게됨
분할 정복(Divide and Conquer)
문제를 나누어 합병하여 문제의 답을 얻는 방식
하양식 접근법으로, 아래로 내려가면서 하위 해답 구현 - 재귀함수 사용
문제를 잘게 쪼갤때 부분 문제는 서로 중복되지 않음 ex) 병합정렬 퀵정렬
공통점 - 작은 문제로 쪼갬
동적 계획법 - 메모이제이션, 반복
분할 정복 - 중복x, 메모이제이션x
'''
# 동적 계획법 알고리즘 이해
# 피보나치 수열
# recursive call
def Fibonacci(n) :
if n == 0 :
return 0
if n == 1 :
return 1
return Fibonacci(n-1) + Fibonacci(n-2)
# dynamic programming
def Fibonacci(n) :
cashe = [0 for i in range(n+1)]
cashe[0] = 0
cashe[1] = 1
for i in range(2, n+1) :
cashe[i] = cashe[i-1] + cashe[i-2]
return cashe[n]
Fibonacci(10)
| true |
83a66e83f8dbd8fba0e2cd3af33d297e5b96013e
|
Python
|
leejjoon/mpl_toolkits.agg_filter
|
/mpl_toolkits/agg_filter.py
|
UTF-8
| 5,004 | 2.640625 | 3 |
[] |
no_license
|
import numpy as np
def smooth1d(x, window_len):
# copied from http://www.scipy.org/Cookbook/SignalSmooth
s=np.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
w = np.hanning(window_len)
y=np.convolve(w/w.sum(),s,mode='same')
return y[window_len-1:-window_len+1]
def smooth2d(A, sigma=3):
window_len = max(int(sigma), 3)*2+1
A1 = np.array([smooth1d(x, window_len) for x in np.asarray(A)])
A2 = np.transpose(A1)
A3 = np.array([smooth1d(x, window_len) for x in A2])
A4 = np.transpose(A3)
return A4
class BaseFilter(object):
def prepare_image(self, src_image, dpi, pad):
ny, nx, depth = src_image.shape
#tgt_image = np.zeros([pad*2+ny, pad*2+nx, depth], dtype="d")
padded_src = np.zeros([pad*2+ny, pad*2+nx, depth], dtype="d")
padded_src[pad:-pad, pad:-pad,:] = src_image[:,:,:]
return padded_src#, tgt_image
def get_pad(self, dpi):
return 0
def __call__(self, im, dpi):
pad = self.get_pad(dpi)
padded_src = self.prepare_image(im, dpi, pad)
tgt_image = self.process_image(padded_src, dpi)
return tgt_image, -pad, -pad
class OffsetFilter(BaseFilter):
def __init__(self, offsets=None):
if offsets is None:
self.offsets = (0, 0)
else:
self.offsets = offsets
def get_pad(self, dpi):
return int(max(*self.offsets)/72.*dpi)
def process_image(self, padded_src, dpi):
ox, oy = self.offsets
a1 = np.roll(padded_src, int(ox/72.*dpi), axis=1)
a2 = np.roll(a1, -int(oy/72.*dpi), axis=0)
return a2
class GaussianFilter(BaseFilter):
"simple gauss filter"
def __init__(self, sigma, alpha=0.5, color=None):
self.sigma = sigma
self.alpha = alpha
if color is None:
self.color=(0, 0, 0)
else:
self.color=color
def get_pad(self, dpi):
return int(self.sigma*3/72.*dpi)
def process_image(self, padded_src, dpi):
#offsetx, offsety = int(self.offsets[0]), int(self.offsets[1])
tgt_image = np.zeros_like(padded_src)
aa = smooth2d(padded_src[:,:,-1]*self.alpha,
self.sigma/72.*dpi)
tgt_image[:,:,-1] = aa
tgt_image[:,:,:-1] = self.color
return tgt_image
class DropShadowFilter(BaseFilter):
def __init__(self, sigma, alpha=0.3, color=None, offsets=None):
self.gauss_filter = GaussianFilter(sigma, alpha, color)
self.offset_filter = OffsetFilter(offsets)
def get_pad(self, dpi):
return max(self.gauss_filter.get_pad(dpi),
self.offset_filter.get_pad(dpi))
def process_image(self, padded_src, dpi):
t1 = self.gauss_filter.process_image(padded_src, dpi)
t2 = self.offset_filter.process_image(t1, dpi)
return t2
from matplotlib.colors import LightSource
class LightFilter(BaseFilter):
"simple gauss filter"
def __init__(self, sigma, fraction=0.5):
self.gauss_filter = GaussianFilter(sigma, alpha=1)
self.light_source = LightSource()
self.fraction = fraction
#hsv_min_val=0.5,hsv_max_val=0.9,
# hsv_min_sat=0.1,hsv_max_sat=0.1)
def get_pad(self, dpi):
return self.gauss_filter.get_pad(dpi)
def process_image(self, padded_src, dpi):
t1 = self.gauss_filter.process_image(padded_src, dpi)
elevation = t1[:,:,3]
rgb = padded_src[:,:,:3]
rgb2 = self.light_source.shade_rgb(rgb, elevation,
fraction=self.fraction)
tgt = np.empty_like(padded_src)
tgt[:,:,:3] = rgb2
tgt[:,:,3] = padded_src[:,:,3]
return tgt
class GrowFilter(BaseFilter):
"enlarge the area"
def __init__(self, pixels, color=None):
self.pixels = pixels
if color is None:
self.color=(1, 1, 1)
else:
self.color=color
def __call__(self, im, dpi):
pad = self.pixels
ny, nx, depth = im.shape
new_im = np.empty([pad*2+ny, pad*2+nx, depth], dtype="d")
alpha = new_im[:,:,3]
alpha.fill(0)
alpha[pad:-pad, pad:-pad] = im[:,:,-1]
alpha2 = np.clip(smooth2d(alpha, self.pixels/72.*dpi) * 5, 0, 1)
new_im[:,:,-1] = alpha2
new_im[:,:,:-1] = self.color
offsetx, offsety = -pad, -pad
return new_im, offsetx, offsety
from matplotlib.artist import Artist
class FilteredArtistList(Artist):
"""
A simple container to draw filtered artist.
"""
def __init__(self, artist_list, filter):
self._artist_list = artist_list
self._filter = filter
Artist.__init__(self)
def draw(self, renderer):
renderer.start_rasterizing()
renderer.start_filter()
for a in self._artist_list:
a.draw(renderer)
renderer.stop_filter(self._filter)
renderer.stop_rasterizing()
| true |
5fe8fb90d314190b7b5042218ac50b84e9e5c4e1
|
Python
|
CyberPunkRavi/semi-supervised-bayesian-classifier
|
/classifier.py
|
UTF-8
| 9,062 | 3.21875 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 12 14:04:38 2018
@author: Alexandre Boyker
"""
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
from helper import plot_confusion_matrix
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
class NaiveBayesSemiSupervised(object):
"""
This class implements a modification of the Naive Bayes classifier
in order to deal with unlabelled data. We use an Expectation-maximization
algorithm (EM).
This work is based on the paper
'Semi-Supervised Text Classification Using EM' by
Kamal Nigam Andrew McCallum Tom Mitchell
available here:
https://www.cs.cmu.edu/~tom/pubs/NigamEtAl-bookChapter.pdf
"""
def __init__(self, max_features=None, max_rounds=30, tolerance=1e-6):
"""
constructor for NaiveBayesSemiSupervised object
keyword arguments:
-- max_features: maximum number of features for documents vectorization
-- max_rounds: maximum number of iterations for EM algorithm
-- tolerance: threshold (in percentage) for total log-likelihood improvement during EM
"""
self.max_features = max_features
self.n_labels = 0
self.max_rounds = max_rounds
self.tolerance = tolerance
def train(self, X_supervised, X_unsupervised, y_supervised):
"""
train the modified Naive bayes classifier using both labelled and
unlabelled data. We use the CountVectorizer vectorizaton method from scikit-learn
positional arguments:
-- X_supervised: list of documents (string objects). these documents have labels
example: ["all parrots are interesting", "some parrots are green", "some parrots can talk"]
-- X_unsupervised: list of documents (string objects) as X_supervised, but without labels
-- y_supervised: labels of the X_supervised documents. list or numpy array of integers.
example: [2, 0, 1, 0, 1, ..., 0, 2]
"""
count_vec = CountVectorizer(max_features=self.max_features)
count_vec.fit(X_supervised + X_unsupervised)
self.n_labels = len(set(y_supervised))
if self.max_features is None:
self.max_features = len(count_vec.vocabulary_ )
X_supervised = np.asarray(count_vec.transform(X_supervised).todense())
X_unsupervised = np.asarray(count_vec.transform(X_unsupervised).todense())
self.train_naive_bayes(X_supervised, y_supervised)
predi = self.predict(X_supervised)
old_likelihood = 1
while self.max_rounds > 0:
self.max_rounds -= 1
predi = self.predict(X_unsupervised)
self.train_naive_bayes(X_unsupervised, predi)
predi = self.predict(X_supervised)
total_likelihood = self.get_log_likelihood( X_supervised, X_unsupervised, y_supervised)
print("total likelihood: {}".format(total_likelihood))
if self._stopping_time(old_likelihood, total_likelihood):
break
old_likelihood = total_likelihood.copy()
def _stopping_time(self, old_likelihood, new_likelihood):
"""
returns True if there is no significant improvement in log-likelihood and false else
positional arguments:
-- old_likelihood: log-likelihood for previous iteration
-- new_likelihood: new log-likelihood
"""
relative_change = np.absolute((new_likelihood-old_likelihood)/old_likelihood)
if (relative_change < self.tolerance):
print("stopping time")
return True
else:
return False
def get_log_likelihood(self, X_supervised, X_unsupervised, y_supervised):
"""
returns the total log-likelihood of the model, taking into account unsupervised data
positional arguments:
-- X_supervised: list of documents (string objects). these documents have labels
example: ["all parrots are interesting", "some parrots are green", "some parrots can talk"]
-- X_unsupervised: list of documents (string objects) as X_supervised, but without labels
-- y_supervised: labels of the X_supervised documents. list or numpy array of integers.
example: [2, 0, 1, 0, 1, ..., 0, 2]
"""
unsupervised_term = np.sum(self._predict_proba_unormalized(X_unsupervised), axis=1)
unsupervised_term = np.sum(np.log(unsupervised_term))
supervised_term = self._predict_proba_unormalized(X_supervised)
supervised_term = np.take(supervised_term, y_supervised)
supervised_term = np.sum(np.log(supervised_term))
total_likelihood = supervised_term + unsupervised_term
return total_likelihood
def word_proba(self, X, y, c):
"""
returns a numpy array of size max_features containing the conditional probability
of each word given the label c and the model parameters
positional arguments:
-- X: data matrix, 2-dimensional numpy ndarray
-- y: numpy array of labels, example: np.array([2, 0, 1, 0, 1, ..., 0, 2])
-- c: integer, the class upon which we condition
"""
numerator = 1 + np.sum( X[np.equal( y, c )], axis=0)
denominator = self.max_features + np.sum( X[ np.equal( y, c)])
return np.squeeze(numerator)/denominator
def class_proba(self, X, y, c):
"""
returns a numpy array of size n_labels containing the conditional probability
of each label given the label model parameters
positional arguments:
-- X: data matrix, 2-dimensional numpy ndarray
-- y: numpy array of labels, example: np.array([2, 0, 1, 0, 1, ..., 0, 2])
-- c: integer, the class upon which we condition
"""
numerator = 1 + np.sum( np.equal( y, c) , axis=0)
denominator = X.shape[0] + self.n_labels
return numerator/denominator
def train_naive_bayes(self, X, y):
"""
train a regular Naive Bayes classifier
positional arguments:
-- X: data matrix, 2-dimensional numpy ndarray
-- y: numpy array of labels, example: np.array([2, 0, 1, 0, 1, ..., 0, 2])
"""
word_proba_array = np.zeros(( self.max_features, self.n_labels))
for c in range(self.n_labels):
word_proba_array[:,c] = self.word_proba( X, y, c)
labels_proba_array = np.zeros(self.n_labels)
for c in range(self.n_labels):
labels_proba_array[c] = self.class_proba( X, y, c)
self.word_proba_array = word_proba_array
self.labels_proba_array = labels_proba_array
def _predict_proba_unormalized(self, X_test):
"""
returns unormalized predicted probabilities (useful for log-likelihood computation)
positional arguments:
-- X: data matrix, 2-dimensional numpy ndarray
"""
proba_array_unormalized = np.zeros((X_test.shape[0], self.n_labels))
for c in range(self.n_labels):
temp = np.power(np.tile(self.word_proba_array[:,c], (X_test.shape[0] ,1)), X_test)
proba_array_unormalized[:,c] = self.labels_proba_array[c] * np.prod(temp, axis=1)
return proba_array_unormalized
def predict_proba(self, X):
"""
returns model predictions (probability)
positional arguments:
-- X: data matrix, 2-dimensional numpy ndarray
"""
proba_array_unormalized = self._predict_proba_unormalized(X)
proba_array = np.true_divide(proba_array_unormalized, np.sum(proba_array_unormalized, axis=1)[:, np.newaxis])
return proba_array
def predict(self, X):
"""
returns model predictions (class labels)
positional arguments:
-- X: data matrix, 2-dimensional numpy ndarray
"""
return np.argmax(self.predict_proba( X), axis=1)
| true |
1408afb90cb1ff056d640762cbc27a010426775d
|
Python
|
ej2/pixelpuncher
|
/pixelpuncher/player/utils/avatar.py
|
UTF-8
| 2,052 | 2.5625 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
import random
from django.db.models import Q
from pixelpuncher.player.models import AvatarLayer, PlayerAvatar
from annoying.functions import get_object_or_None
def get_unlocked_layers_by_type(player, layer_type):
ids = player.layers.filter(layer__layer_type=layer_type).values_list('layer__id', flat=True)
layers = AvatarLayer.objects.filter(Q(layer_type=layer_type) & (Q(unlock_method='start') | Q(id__in=ids)))
return layers
def set_avatar(player, hair_id, face_id, body_id, shirt_id):
for layer in player.layers.all():
layer.current = False
layer.save()
hair_layer, created = PlayerAvatar.objects.get_or_create(player=player, layer_id=hair_id)
hair_layer.current = True
hair_layer.save()
face_layer, created = PlayerAvatar.objects.get_or_create(player=player, layer_id=face_id)
face_layer.current = True
face_layer.save()
body_layer, created = PlayerAvatar.objects.get_or_create(player=player, layer_id=body_id)
body_layer.current = True
body_layer.save()
shirt_layer, created = PlayerAvatar.objects.get_or_create(player=player, layer_id=shirt_id)
shirt_layer.current = True
shirt_layer.save()
def generate_random_starting_avatar(player):
body_layer = random.choice(get_unlocked_layers_by_type(player, 'body'))
hair_layer = random.choice(get_unlocked_layers_by_type(player, 'hair'))
face_layer = random.choice(get_unlocked_layers_by_type(player, 'face'))
shirt_layer = random.choice(get_unlocked_layers_by_type(player, 'shirt'))
set_avatar(player, body_layer.id, hair_layer.id, face_layer.id, shirt_layer.id)
def unlock_layer(player, layer):
"""
Unlocks a avatar layer for a player
:param player:
:param layer:
:return: True if layer was unlocked
"""
player_avatar = get_object_or_None(PlayerAvatar, player=player, layer=layer)
if player_avatar is None:
player_avatar = PlayerAvatar(player=player, layer=layer)
player_avatar.save()
return True
else:
return False
| true |
5863183924cfca3fe4e04b80ba2303b8eec0701f
|
Python
|
fabricST/Learn_python3
|
/Home_Work_5/H_W5_P1_1.py
|
UTF-8
| 1,617 | 4.71875 | 5 |
[] |
no_license
|
# Person (два свойства: 1. теперь full_name пусть будет свойством, а не функцией (одно поле, мы ожидаем - тип строка и
# состоит из двух слов «имя фамилия»), а свойств name и surname нету, 2. год рождения).
# Реализовать методы, которые:
# • выделяет только имя из full_name
# • выделяет только фамилию из full_name;
# • вычисляет сколько лет было/есть/исполнится в году, который передаётся параметром (obj.age_in(year));
# если не передавать параметр, по умолчанию, сколько лет в этом году;
class Person:
def __init__(self, full_name, birth_years):
self.full_name = full_name
self.birth_years = birth_years
def get_name(self):
x = self.full_name.find(' ')
self.name = self.full_name[:x]
return self.name
def get_surname(self):
y = self.full_name.find(' ')
self.surname = self.full_name[y+1:]
return self.surname
def age_in(self, year=2020):
age = year - self.birth_years
return age
def __str__(self):
return f"Person with fullname {self.full_name}, name {self.name}, surname {self.surname}, birth years" \
f"{self.birth_years}"
if __name__ == '__main__':
p = Person("Сторчак Евгений", 1987)
print(p.get_name())
print(p.get_surname)
print(p.age_in())
| true |
6359cf4632b5072b64c08639e0e2dd3618c6da7f
|
Python
|
t7ahaa00/Parkissa-projekti
|
/Computer_vision/AWS/awsFindSlots.py
|
UTF-8
| 1,147 | 2.71875 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 23 11:44:14 2020
@author: Antti
"""
import json
from Freeslot import Freeslot
from io import BytesIO
def checkFreeSlots(cars, slotsFilePath, s3Session, bucket):
slots=[]
free_slots = []
busy_slots=[]
s3 = s3Session.resource("s3")
bucket = s3.Bucket(bucket)
object = bucket.Object(slotsFilePath)
data_file = BytesIO(object.get()['Body'].read())
data = json.load(data_file)
for slot in data:
slots.append(slot)
for i, slot in enumerate(slots):
for car in cars:
if FindPoint(car, slot['x'], slot['y']):
busy_slots.append(i)
break
for i,slot in enumerate(slots):
if i not in busy_slots:
free_slots.append(json.loads(Freeslot(slot["sId"]).toJson()))
return {"slots":free_slots}, len(slots)
def FindPoint(box, x, y) :
x1 , y1 , x2 , y2 = box
if (x > x1 and x < x2 and
y > y1 and y < y2) :
return True
else :
return False
| true |
ba00d28af4b4b762c381f5a7ef5c0c0bf76d321e
|
Python
|
nguyennhatminh-mgr/MC-Assignment1
|
/initial/src/test/ParserSuite.py
|
UTF-8
| 33,223 | 3.046875 | 3 |
[] |
no_license
|
import unittest
from TestUtils import TestParser
class ParserSuite(unittest.TestCase):
def test_simple_program(self):
"""Simple program: int main() {} """
input = """int main() {}"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,281))
def test_more_complex_program(self):
"""More complex program"""
input = """int main () {
putIntLn(4);
}"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,282))
def test_wrong_miss_close(self):
"""Miss ) int main( {}"""
input = """int main( {}"""
expect = "Error on line 1 col 10: {"
self.assertTrue(TestParser.checkParser(input,expect,283))
#test variable declaration
def test_var_declaration(self):
"""var declaration: int a,b,c; """
input = """int a,b,c;"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,211))
def test_var_declaration_1(self):
"""var declaration:"""
input = """int a,b,c[10];"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,212))
def test_var_declaration_2(self):
"""var declaration: """
input = """int a[0];"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,213))
def test_var_declaration_3(self):
"""var declaration: """
input = """int a[0],b,c[10];"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,214))
def test_var_declaration_3(self):
"""var declaration: """
input = """int a,b,c;
float b[10];
boolean xyz;"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,215))
def test_var_declaration_4(self):
"""var declaration: """
input = """int a,b,c;
float b[];
boolean xyz;"""
expect = "Error on line 2 col 24: ]"
self.assertTrue(TestParser.checkParser(input,expect,284))
def test_var_declaration_5(self):
"""var declaration: """
input = """
int a,b,c,float b;
float b[10];
boolean xyz;"""
expect = "Error on line 2 col 26: float"
self.assertTrue(TestParser.checkParser(input,expect,285))
def test_var_declaration_6(self):
"""var declaration: """
input = """
int a,b,c;
float b[10], string c[0];
boolean xyz;"""
expect = "Error on line 3 col 29: string"
self.assertTrue(TestParser.checkParser(input,expect,286))
def test_var_declaration_7(self):
"""var declaration: """
input = """
int a,b,c=10;
float b[10]; string c[0];
boolean xyz;"""
expect = "Error on line 2 col 25: ="
self.assertTrue(TestParser.checkParser(input,expect,287))
def test_var_declaration_8(self):
"""var declaration: """
input = """
int a,b,c;
float b[4]={1,2,3,4}; string c[0];
boolean xyz;"""
expect = "Error on line 3 col 26: ="
self.assertTrue(TestParser.checkParser(input,expect,288))
#test function declaration
def test_func_declaration(self):
"""function declaration: """
input = """int abc(){}"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,216))
def test_func_declaration_1(self):
"""function declaration: """
input = """int[] a(int a, int b){
int a,b[10];
float x,y,z;
string str;
}"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,217))
def test_func_declaration_2(self):
"""function declaration: """
input = """int[] a(int a, int b){
int a,b[10];
float x,y,z;
string str;
}
void a(int a[],float b,boolean c){}"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,218))
def test_func_declaration_3(self):
"""function declaration: """
input = """float foo(string a, int b){
int a,b[10];
string str[4];
}
void a(){
boolean b[9];
}"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,219))
def test_func_declaration_4(self):
"""function declaration: """
input = """
float[10] foo(string a, int b){
int a,b[10];
string str[4];
}
"""
expect = "Error on line 2 col 18: 10"
self.assertTrue(TestParser.checkParser(input,expect,289))
def test_func_declaration_5(self):
"""function declaration: """
input = """
float] foo(string a[], int b){
int a,b[10];
boolean array[4];
}
"""
expect = "Error on line 2 col 17: ]"
self.assertTrue(TestParser.checkParser(input,expect,290))
def test_func_declaration_6(self):
"""function declaration: """
input = """
float[] main(string a[100], int b){
int a,b[10];
boolean array[4];
}
"""
expect = "Error on line 2 col 34: 100"
self.assertTrue(TestParser.checkParser(input,expect,291))
def test_func_declaration_7(self):
"""function declaration: """
input = """
float[] main(string a[], b){
int a,b[10];
boolean array[400+m];
}
"""
expect = "Error on line 2 col 37: b"
self.assertTrue(TestParser.checkParser(input,expect,292))
#test expression
def test_expression(self):
"""function expression: """
input = """float foo(string a, int b){
int a,b[10];
string str[4];
}
int a(){
a(a+b>c,a[a+b]);
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,220))
def test_expression_1(self):
"""function expression: """
input = """
int test(){
if(a>4) return;
i+2;
i=i+4;
z>=5+2;
if(b) return z+2;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,221))
def test_expression_2(self):
"""function expression: """
input = """
int test(){
int a;
a=a+4*6+3>5;
a/2=p;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,222))
def test_expression_3(self):
"""function expression: """
input = """
void main(){
if( a==b && a!=c){
a=b;
a=c;
}
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,271))
def test_expression_4(self):
"""function expression: """
input = """
void main(){
if( a==b + a!=c){
a=b;
a=c;
}
}
"""
expect = "Error on line 3 col 24: !="
self.assertTrue(TestParser.checkParser(input,expect,272))
def test_expression_5(self):
"""function expression: """
input = """
void main(){
if( a==b + (a!=c)){
a=b;
a=c;
}
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,273))
def test_expression_6(self):
"""function expression: """
input = """
void main(){
if( a>=b + a<c){
a=b;
a=c;
}
}
"""
expect = "Error on line 3 col 24: <"
self.assertTrue(TestParser.checkParser(input,expect,274))
def test_expression_7(self):
"""function expression: """
input = """
void main(){
if( (a>=b) + a<c){
a=b;
a=c;
}
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,275))
def test_expression_8(self):
"""function expression: """
input = """
void main(){
foo(2)[3+x]=a[b[2]]+3;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,276))
def test_expression_9(self):
"""function expression: """
input = """
void main(){
foo(2)[3+x=a[b[2]]+3;
}
"""
expect = "Error on line 3 col 32: ;"
self.assertTrue(TestParser.checkParser(input,expect,277))
def test_expression_10(self):
"""function expression: """
input = """
void main(int a[10]){
foo(2)[3+x]=a[b[2]]+3;
}
"""
expect = "Error on line 2 col 24: 10"
self.assertTrue(TestParser.checkParser(input,expect,278))
def test_expression_11(self):
"""function expression: """
input = """
void main(int a[],float b,string c){
array[a[a[c]+b]]=c*b-d/a;
x<=y || a == b ;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,279))
def test_expression_12(self):
"""function expression: """
input = """
void main(int a[],float b,string c){
array[a[a[c]+b]]=c**b-d/a;
x<=y || a == b && c>d;
}
"""
expect = "Error on line 3 col 31: *"
self.assertTrue(TestParser.checkParser(input,expect,280))
#test if statement
def test_if_statement(self):
"""if statement """
input = """
int main(){
a[4];
if(true) return false;
if(0.5) return z;
if(1e2) break;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,223))
def test_if_statement_1(self):
"""if statement """
input = """
int main(){
a[4];
if(true && a > 5) a[foo()]=0;
if(0.5+a==9) return z;
if(1e2<=a) break;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,224))
def test_if_statement_2(self):
"""if statement """
input = """
{
}
int main(){
}
"""
expect = "Error on line 2 col 8: {"
self.assertTrue(TestParser.checkParser(input,expect,225))
def test_if_statement_3(self):
"""if statement """
input = """
int main(){
if(a>b){ a =a + 1;
break;
continue;
}
else{
a=-b;
}
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,226))
def test_if_statement_4(self):
"""if statement """
input = """
int main(){
if(a>b){ a =a + 1;
break;
continue;
}
else{
if(a-b>=6) return;
break;
}
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,227))
def test_if_statement_5(self):
"""if statement """
input = """
int main(){
int a[10];
if(a>b) a=a+1 else if(a<=c) return; else a>b;
}
"""
expect = "Error on line 4 col 26: else"
self.assertTrue(TestParser.checkParser(input,expect,228))
def test_if_statement_6(self):
"""if statement """
input = """
int main(){
string a[10];
a[a[5]]=true;
if(a>b) a=a+1; else if(a<=c) return; else a<=b;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,229))
def test_if_statement_7(self):
"""if statement """
input = """
float a(){}
int main(){
string a[10];
a[a[5]]=true;
if(a>b) a=true; else if(a<=c>d) return "o"; else a<=(b+5);
}
"""
expect = "Error on line 6 col 40: >"
self.assertTrue(TestParser.checkParser(input,expect,230))
def test_if_statement_8(self):
"""else lack if """
input = """
float a(){}
int main(){
else {
a=a[a>b-c];
}
}
"""
expect = "Error on line 4 col 12: else"
self.assertTrue(TestParser.checkParser(input,expect,293))
def test_if_statement_9(self):
"""if abundant else"""
input = """
int main(){
if(c>d/a) return;
else {
a=a[a>b-c];
}
else
a[c]==d;
}
"""
expect = "Error on line 7 col 12: else"
self.assertTrue(TestParser.checkParser(input,expect,294))
#test for statement
def test_for_statement(self):
"""for statement """
input = """
float a(){
int a[9],i;
for(i=0;i<9;i=i+1){
a[i]="PPL Lover";
}
print("successful");
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,231))
def test_for_statement_1(self):
"""for statement """
input = """
float a(){
int a[9],i;
for(int i=0;i<9;i=i+1){
a[i]="PPL Lover";
}
print("unsuccessful");
}
"""
expect = "Error on line 4 col 16: int"
self.assertTrue(TestParser.checkParser(input,expect,232))
def test_for_statement_2(self):
"""for statement """
input = """
float a(){
int a[9],i;
for(i=0;i<9;i=i+1){
if(i+1==9){
a[i]=a[0];
break;
}
a[i]=a[i+1];
}
print("successful");
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,233))
def test_for_statement_3(self):
"""for statement """
input = """
float a(){
int a[9],i;
for(i=8;i>=0;i=i+2){
if(i==false)
return true;
}
print("successful");
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,234))
def test_for_statement_4(self):
"""for statement """
input = """
float a(){
int a[9],i;
for(i=8;;i=i+2){
if(i==false)
return true;
}
print("successful");
}
"""
expect = "Error on line 4 col 20: ;"
self.assertTrue(TestParser.checkParser(input,expect,295))
def test_for_statement_5(self):
"""for statement """
input = """
float main(){
int a[10],i;
for(){
if(i==false)
return true;
}
}
"""
expect = "Error on line 4 col 16: )"
self.assertTrue(TestParser.checkParser(input,expect,296))
#test some special case
def test_invalid_var_decl_case1(self):
input = """int a,b[]; """
expect = "Error on line 1 col 8: ]"
self.assertTrue(TestParser.checkParser(input,expect,235))
def test_invalid_var_decl_case2(self):
input = """int main(){
if(5>=(a<8)){a=a+1;}
} """
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,236))
def test_valid_program_5(self):
input = """
int main(){
cout((array[i])[j]);
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,237))
#test do while statement
def test_dowhile_statement(self):
"""do while statement """
input = """
float a(){
do a+b=1; while a>b;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,238))
def test_dowhile_statement_1(self):
"""do while statement """
input = """
void main(string args[]){
do{
a+b=1;
if(a>b) break;
else return;
}while a<b+c;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,239))
def test_dowhile_statement_2(self):
"""do while statement """
input = """
void main(string args[]){
do{
a+b=1;
if(a>b) break;
else return;
}
{
int x,y,z;
x=y+z;
}while [a<b+c];
}
"""
expect = "Error on line 11 col 19: ["
self.assertTrue(TestParser.checkParser(input,expect,240))
def test_dowhile_statement_3(self):
"""do while statement """
input = """
void main(string args[]){
do{
a+b=1;
if(a>b) break;
else return;
{
int x,y,z;
x=y+z;
}
}
while a[a<b+c];
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,241))
def test_dowhile_statement_4(self):
"""do while statement """
input = """
void main(string args[]){
do{
void var;
}
while a[a<b+c];
}
"""
expect = "Error on line 4 col 16: void"
self.assertTrue(TestParser.checkParser(input,expect,242))
def test_dowhile_statement_5(self):
"""do while statement """
input = """
void main(string args[]){
do{
//void var;
-a+c=d;
}
while a>2.4e3;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,243))
def test_dowhile_statement_6(self):
"""do miss while statement """
input = """
void main(string args[]){
do{
//void var;
-a+c=d;
}
;
}
"""
expect = "Error on line 7 col 12: ;"
self.assertTrue(TestParser.checkParser(input,expect,297))
def test_dowhile_statement_7(self):
"""while miss do in do while statement """
input = """
void main(string args[]){
{
//void var;
-a+c=d;
}
while (PPL==pass);
}
"""
expect = "Error on line 7 col 12: while"
self.assertTrue(TestParser.checkParser(input,expect,298))
def test_dowhile_statement_8(self):
"""do while miss ; in do while statement """
input = """
void main(string args[]){
do{
//void var;
-a+c=d;
}
while (PPL==pass)
}
"""
expect = "Error on line 8 col 8: }"
self.assertTrue(TestParser.checkParser(input,expect,299))
def test_dowhile_statement_9(self):
""" do while statement """
input = """
void main(string args[]){
do{
consolelog("string\\n");
print(a,b,c);
if(PPL==pass)
print("An mung");
}
while (PPL==pass);
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,300))
def test_dowhile_statement_10(self):
""" reverse do while role in do while statement """
input = """
void main(string args[]){
while{
dowhilebidao=true;
return false;
toPassPPL=true;
}
do (PPL==pass);
}
"""
expect = "Error on line 3 col 12: while"
self.assertTrue(TestParser.checkParser(input,expect,301))
#test break statement
def test_break_statement(self):
"""break statement """
input = """
float test(){
break;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,244))
def test_break_statement_1(self):
"""break statement """
input = """
float test(){
if(a>b)
break;
else
-a/b+c*1=d;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,245))
def test_break_statement_2(self):
"""break statement """
input = """
float test(){
if(a>b)
break a;
}
"""
expect = "Error on line 4 col 22: a"
self.assertTrue(TestParser.checkParser(input,expect,246))
def test_break_statement_3(self):
"""break statement """
input = """
float test(){
if(true) a="I love PPL";
if(a>b)
break a+b>c;
}
"""
expect = "Error on line 5 col 22: a"
self.assertTrue(TestParser.checkParser(input,expect,247))
def test_break_statement_4(self):
"""break statement """
input = """
float test(){
for(i=1;i<10;i=i+1){
a="Principle";
b="Programming";
c="Language";
z=a+b+c;
if(z==null) break;
print("rs = " + z);
}
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,248))
def test_break_statement_5(self):
input = """float foo(){
string arr[];
break;
}
"""
expect = "Error on line 2 col 31: ]"
self.assertTrue(TestParser.checkParser(input,expect,249))
#test continue statement
def test_continue_statement(self):
input = """float foo(){
if(true) continue;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,250))
def test_continue_statement_1(self):
input = """float foo(){
if(true) continue b;
}
"""
expect = "Error on line 2 col 38: b"
self.assertTrue(TestParser.checkParser(input,expect,251))
def test_continue_statement_2(self):
input = """float foo(){
do{
a=a+1;
if(a<0) continue;
}
while (a>10);
if(true) continue;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,252))
def test_continue_statement_4(self):
input = """float foo(){
a=a+1;
if(a>10) continue;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,253))
def test_continue_statement_5(self):
input = """
int main(){
cout((array[i])[j]);
if(a>c) continue;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,254))
def test_continue_statement_6(self):
input = """
int main(){
do {
continue;
}
while((arr[i])[j]);
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,255))
#test return statement
def test_return_statement(self):
input = """float foo(int a,int b){
return a+b;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,256))
def test_return_statement_1(self):
input = """float gt(int n){
if(n==1 || n==0) return 1;
return n*gt(n-1);
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,257))
def test_return_statement_2(self):
input = """float gt(int n){
return a[a[a]];
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,258))
def test_return_statement_3(self):
input = """float gt(int n){
return
}
"""
expect = "Error on line 3 col 16: }"
self.assertTrue(TestParser.checkParser(input,expect,259))
def test_return_statement_4(self):
input = """float gt(int n){
return a[a[a]]+b[i]==c;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,260))
def test_return_statement_5(self):
input = """float gt(int n){
return a*;
}
"""
expect = "Error on line 2 col 29: ;"
self.assertTrue(TestParser.checkParser(input,expect,261))
#test expression statement
def test_expression_statement(self):
input = """float gt(int n){
n=n*n;
n+2;
100;
1.E2;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,262))
def test_expression_statement_1(self):
input = """float gt(int n){
n=n*n;
gt(100,n+2);
1.E2;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,263))
def test_expression_statement_2(self):
input = """float gt(int n){
a[a[x=y+z]]=ab;
x2;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,264))
def test_expression_statement_3(self):
input = """float gt(int n){
a[a[x=y+z]]=ab;
x2
}
"""
expect = "Error on line 4 col 16: }"
self.assertTrue(TestParser.checkParser(input,expect,265))
#test block statement
def test_block_statement(self):
input = """float gt(int n){
{
a>b;
a=x*y/z;
x2;
a[4];
}
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,266))
def test_block_statement_1(self):
input = """float gt(int n){
a>b;
a=x*y/z;
x2;
a[4];
}
}
"""
expect = "Error on line 7 col 16: }"
self.assertTrue(TestParser.checkParser(input,expect,267))
def test_block_statement_2(self):
input = """{
int a;
float b[10];
}
"""
expect = "Error on line 1 col 0: {"
self.assertTrue(TestParser.checkParser(input,expect,268))
def test_block_statement_3(self):
input = """int main(){
float a(){}
}
"""
expect = "Error on line 2 col 19: ("
self.assertTrue(TestParser.checkParser(input,expect,269))
def test_block_statement_4(self):
input = """int main(){
float a;
a(10,a[a[7]],c+d>b);
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,270))
def test_invalid_continue(self):
"""ID is siminar with some keyword """
input = """void func(){
int continue;
continue == continue && false;
}
"""
expect = "Error on line 2 col 16: continue"
self.assertTrue(TestParser.checkParser(input,expect,302))
def test_invalid_expression(self):
"""expression miss anry in add """
input = """void func(){
int a;
+i;
}
"""
expect = "Error on line 3 col 12: +"
self.assertTrue(TestParser.checkParser(input,expect,303))
def test_invalid_expression_1(self):
"""expression miss anry in add """
input = """void func(){
int a;
i+i;
a-a;
z*z+3;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,304))
def test_invalid_expression_2(self):
"""expression miss anry in mul """
input = """void func(){
int a;
-i+a>b;
*a=true;
}
"""
expect = "Error on line 4 col 12: *"
self.assertTrue(TestParser.checkParser(input,expect,305))
def test_invalid_expression_3(self):
"""expression miss anry in add """
input = """void func(){
int a;
-i+a>b;
a*a=true;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,306))
def test_invalid_expression_4(self):
"""booleanlit in expression """
input = """void func(){
a=true;
true+false=true;
true*false=false;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,307))
def test_invalid_expression_5(self):
"""booleanlit in expression """
input = """void func(){
a=true;
array[true+false]=true*false;
b[true[false]]=false;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,308))
def test_invalid_expression_6(self):
"""booleanlit in expression """
input = """void func(){
a=true;
array[true+false]=true*false;
b[true[false]]=-(false+a);
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,309))
def test_invalid_expression_7(self):
"""booleanlit in expression """
input = """void func(){
int a[10];
a[0]=false;
a[1]=-true+6/a;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,310))
def test_invalid_expression_8(self):
"""booleanlit in expression """
input = """void func(){
true;
false;
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,311))
# def test_invalid_expression_9(self):
# """booleanlit in expression """
# input = """void func(){
# int a;
# a=a+b>c;
# boolean b;
# b=false;
# }
# """
# expect = "successful"
# self.assertTrue(TestParser.checkParser(input,expect,312))
# def test_invalid_expression_10(self):
# """booleanlit in expression """
# input = """void func(){
# int a;
# a=a<b==c>c;
# boolean b;
# b=false;
# }
# """
# expect = "successful"
# self.assertTrue(TestParser.checkParser(input,expect,313))
def test_invalid_expression_20(self):
"""booleanlit in expression """
input = """int a;
a=10;
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,314))
def test_invalid_expression_21(self):
"""booleanlit in expression """
input = """
int x, y;
int main(){
a=123";
}
"""
expect = "successful"
self.assertTrue(TestParser.checkParser(input,expect,315))
| true |
39ed958788340ea3385ed61e23a9f115b29c628d
|
Python
|
thejonathanvancetrance/Alfalfa
|
/API.py
|
UTF-8
| 41,161 | 3.125 | 3 |
[
"MIT"
] |
permissive
|
######
#imports
######
# general
import statistics
import datetime
from sklearn.externals import joblib # save and load models
import random
# data manipulation and exploration
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
## machine learning stuff
# preprocessing
from sklearn import preprocessing
# feature selection
from sklearn.feature_selection import SelectKBest, SelectPercentile
from sklearn.feature_selection import f_regression
# pipeline
from sklearn.pipeline import Pipeline
# train/testing
from sklearn.model_selection import train_test_split, KFold, GridSearchCV, cross_val_score
# error calculations
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
# models
from sklearn.linear_model import LinearRegression # linear regression
from sklearn.linear_model import BayesianRidge #bayesisan ridge regression
from sklearn.svm import SVR # support vector machines regression
from sklearn.gaussian_process import GaussianProcessRegressor # import GaussianProcessRegressor
from sklearn.neighbors import KNeighborsRegressor # k-nearest neightbors for regression
from sklearn.neural_network import MLPRegressor # neural network for regression
from sklearn.tree import DecisionTreeRegressor # decision tree regressor
from sklearn.ensemble import RandomForestRegressor # random forest regression
from sklearn.ensemble import AdaBoostRegressor # adaboost for regression
# saving models
from sklearn.externals import joblib
#######################################
#DATAFRAME FUNCTIONS
#######################################
def combineLocData(dfList, state, city):
"""
inputs: dfList- list of tuples - the first element of the tuple is a date object representing the date the crops
in the corresponding dataframe were planted. The 2nd element is a dataframe.
state- string - the name of the state that corresponds with the data in the dataframe
city- string - name of the city that corresponds with the data in the dataframe
outputs: a dataframe that is an aggregate of all of the given dataframes.
NOTE: This function assumes that the dataframes contain a "Variety" column, columns labeled with dates (as strings),
and any columns representing a "total" amount has a label which is a string and ends with a ")".
"""
#initialize list of dictionaries
dictList = []
for dateSown, df in dfList:
#get the sownDate
sownDate = dateSown
#get the number of columns
colNumber = df.shape[1]
#get the column names
colNames = list(df.columns.values)
#loop through every row (each row is a series)
for index, row in df.iterrows():
#initialize yield list to hold values for valDict
yieldList = []
#for every column in this row
for i in range(colNumber):
#if it is a column labeled "Variety" column (assumes there is only one "Variety" column)
if colNames[i] == "Variety" or colNames == "variety" or colNames[i] == "Variety " or colNames[i] == "variety ":
#get the variety value
variety = row[i]
#if it is not a "total" column
elif colNames[i][-1] != ")":
#get the date the crop was harvested
yieldDateString = colNames[i]
#convert the yield date from a string to a datetime
yieldDate = datetime.datetime.strptime(yieldDateString, '%m/%d/%Y').date()
#get the yield date as an epoch time
yieldDateEpoch = datetime.datetime.strptime(yieldDateString, '%m/%d/%Y').timestamp()
#get the yield
cropYield = df.iloc[index, i]
yieldTup = (yieldDate, yieldDateEpoch, cropYield)
yieldList.append(yieldTup)
#fill dictionary
for tup in yieldList:
#get vals
yieldDate = tup[0]
yieldDateEpoch = tup[1]
cropYield = tup[2]
#initialize dicitonary
dataDict = {}
dataDict["State"] = state
dataDict["City"] = city
dataDict["Date Sown"] = sownDate
dataDict["Variety"] = variety
dataDict["Date of Cut"] = yieldDate
dataDict["Date of Cut (Epoch)"] = yieldDateEpoch
dataDict["Yield (tons/acre)"] = cropYield\
#append dictionary to list of dictionaries
dictList.append(dataDict)
#make the final dataframe
finalDf = pd.DataFrame(dictList)
#rearrange columns of finalDf
finalDf = finalDf[['State', 'City', 'Date Sown', 'Variety', 'Date of Cut', 'Date of Cut (Epoch)', 'Yield (tons/acre)']]
#return result
return finalDf
def convertToTons(df):
"""
inputs: df - a dataframe where all values in the table are ints or floats
except for one column which could have variety names.
outputs: a dataframe that is the same as the input except all of the values are converted from
(lbs/acre) to (tons/acre). (In other words, every values is divided by 2000.0)
"""
#get the number of columns
colNumber = df.columns.get_loc(col)
#get the number of rows
numRows = len(df.index)
#for every column in df
for col in df:
if col != "Variety":
#for every row
for rowNumber in range(numRows):
oldVal = df.iloc[rowNumber, colNumber]
newVal = round(oldVal/2000.0, 2)
df.iloc[rowNumber, colNumber] = newVal
return df
def checkSownHarvestDates(aDf):
"""
inputs: aDf -dataframe - has columns labeled "Date Sown" and "Date of Cut"
output: outputs a dataframe with a column named "Harvested in Sown Year". This columns stores a 1.0 if the data point in that
row has a cut date in the same year as its sown date
"""
### error checks
#- confirms that the given dataframe has columns labeled 'Date Sown' and 'Date of Cut'
try:
sownDate = aDf.loc[0, 'Date Sown']
cutDate = aDf.loc[0, 'Date of Cut']
except ValueError:
raise ValueError("The input dataframe does not have columns labeled 'Date Sown' and 'Date of Cut'")
# loop through all of the rows of the dataframe
for index, row in aDf.iterrows():
# get sown and harvest dates
sownDate = aDf.loc[index, "Date Sown"]
cutDate = aDf.loc[index, "Date of Cut"]
# convert the strings to date objects
sownDate = datetime.datetime.strptime(sownDate, '%Y-%m-%d').date()
cutDate = datetime.datetime.strptime(cutDate, '%Y-%m-%d').date()
# get the years
sownYear = int(sownDate.year)
cutYear = int(cutDate.year)
if sownYear == cutYear:
aDf.loc[index, "Harvested in Sown Year"] = int(1)
else:
aDf.loc[index, "Harvested in Sown Year"] = int(0)
return aDf
def checkFirstHarvest(aDf):
"""
inputs: aDf - dataframe - has columns labeled "Date Sown" and "Date of Cut"
output: outputs a dataframe with a column named "First Harvest of Season". This columns stores a 1.0 if the
data point in that row was the first harvest of that year.
"""
### error checks
#- confirms that the given dataframe has columns labeled 'Date Sown' and 'Date of Cut'
try:
sownDate = aDf.loc[0, 'Date Sown']
cutDate = aDf.loc[0, 'Date of Cut']
except ValueError:
raise ValueError("The input dataframe does not have columns labeled 'Date Sown' and 'Date of Cut'")
# initilalize a dictionary that will store the dates of each season. It will take the form of
# {(State, City, Date Sown): {year of harvest1: [list of cuts in that year],
# year of harvest2: [list of cuts in that year]...}
# (State, City, Date Sown)....}
dateDict = {}
# loop through all of the rows of the dataframe to fill dateDict
for index, row in aDf.iterrows():
# get variables
state = aDf.loc[index, "State"]
city = aDf.loc[index, "City"]
sownDate = aDf.loc[index, "Date Sown"]
cutDate = aDf.loc[index, "Date of Cut"]
# convert the strings to date objects
cutDate = datetime.datetime.strptime(cutDate, '%Y-%m-%d').date()
# get the year
cutYear = int(cutDate.year)
# make the key for dataDict
identifier = (state, city, sownDate)
# if the key is not in dataDict, put it in
if identifier not in dateDict:
dateDict[identifier] = {}
# if the cut year is not in dataDict[identifer], then put it in
if cutYear not in dateDict[identifier]:
dateDict[identifier][cutYear] = []
# add the cut date to the list, if it is not already in there
if cutDate not in dateDict[identifier][cutYear]:
dateDict[identifier][cutYear].append(cutDate)
# make dictionary that stores the first date of every harvest. It will be of the form:
# {(State, City, Date Sown, Year of Harvest): first date of harvest}
firstDateDict = {}
for identifier in dateDict:
for year in dateDict[identifier]:
state = identifier[0]
city = identifier[1]
sownDate = identifier[2]
# sort each list
dateDict[identifier][year].sort()
# get the first harvest
firstHarvest = dateDict[identifier][year][0]
# add entry to dictionary
firstDateDict[(state, city, sownDate, year)] = firstHarvest
# loop through every row of the dataframe and make the value of the column "First Harvest of Season"
# a 1.0 if the cut date is the first harvest of the season, or a 0.0 if it is not.
for index, row in aDf.iterrows():
# get variables
state = aDf.loc[index, "State"]
city = aDf.loc[index, "City"]
sownDate = aDf.loc[index, "Date Sown"]
cutDate = aDf.loc[index, "Date of Cut"]
# convert the strings to date objects
cutDate = datetime.datetime.strptime(cutDate, '%Y-%m-%d').date()
# get the year
cutYear = int(cutDate.year)
# make identifier (the key to firstDateDict)
identifier = (state, city, sownDate, cutYear)
# get the first date of the harvest
firstDate = firstDateDict[identifier]
# fill values of "First Harvest of Season"
if firstDate == cutDate:
aDf.loc[index, "First Date of Season"] = int(1)
else:
aDf.loc[index, "First Date of Season"] = int(0)
# return result
return aDf
##############################################################################################
#PLOT FUNCTIONS
##############################################################################################
def plotYield(aDataframe, cityName, sownDate=""):
"""
inputs- aDataframe- dataframe obj - should have columns called "City", "Date of Cut", and "Yields (tons/acre)"
cityName - string - the name of the city in which the crop yield data should be gathered from
sownDate- string of the form '%Y-%m-d%' - the date of the sown date
output - no output, but it does generate a graph showing the average yield of crops for a particular city
NOTE: assumes that the package matplotlib as been imported
"""
## lets plot the average alfalfa yield over time (x-axis= datetime, y-axis= yield)
if sownDate == "":
cityDf = df.loc[aDataframe['City'] == cityName]
else:
cityDf = df.loc[ (aDataframe['Date Sown'] == sownDate) & (aDataframe['City'] == cityName) ]
# lets make a dictionary holding the values to be plotted. It will
# be of the form: {date_of_Cut: avgYield of all varieties}
plotDict = {}
for index, row in cityDf.iterrows():
doc = cityDf.loc[index, "Date of Cut"]
if doc not in plotDict:
plotDict[doc] = [0.0, 0] #this list is of the form [sumOfYield, numberOfVarietiesInSum]
aYield = cityDf.loc[index, "Yield (tons/acre)"]
plotDict[doc][0] += aYield
plotDict[doc][1] += 1
# make lists that will be used to make the plot
xList = []
yList = []
for key in plotDict:
# get x-value
datetimeObj = datetime.datetime.strptime(key, '%Y-%m-%d')
xList.append(datetimeObj)
# get y-value
aSum = plotDict[key][0]
n = plotDict[key][1]
avg = (aSum/n)*1.0
yList.append(avg)
#plot settings
dates = matplotlib.dates.date2num(xList)
matplotlib.pyplot.plot_date(dates, yList)
plt.gcf().autofmt_xdate()
plt.show()
def plotAlfAndWeather(alfDf, wDf, city, sownDate, weather, athensReplacement="Watuga", show=True):
"""
inputs- alfDf - dataframe - dataframe storing alfalfa yield data. Must have columns labeled
"City", "Date Sown", "Date of Cut", and "Yield (tons/acre)"
- wDf - dataframe - dataframe storing weather data. Must have colums labeled\
"City", "Date", and weather(this is the input variable)
- city - string - string name of the city whose data will be plotted. The city must be in
the "City" columns of alfDf and wDf
-sownDate - string of the form year-month-day (XXXX-XX-XX) - the sown date of the data to be
plotted. This must be in the "Date Sown" col of alfDf
-weather - string - represents some type of data. Must be the same as a col name of wDf
-athensReplacement - string - the GAEMN data does not have athens weather data, but it does
have Watkinsville data. So this input must have a value in the
col "City" of wDf. This data will be used as the athens data.
-show - boolean - If 'True', then the final plot will be shown. Else, no plots will be shown.
output- returns the final plot while also showing that plot if show=True.
NOTE: The average yield of all alfalfa varieties at each cut is being graphed. It is assumed that there is a df with
the variety yield data ('alfDf') and a different dataframe has the daily weather data ('wDf')
"""
#imports
import matplotlib.pyplot as plt
## make sub dataframes
#alfalfa
cityAlfDf = alfDf.loc[(alfDf["City"] == city) & (alfDf["Date Sown"] == sownDate)]
#weather
# check to see if city="Athens" (there is not GAEMN data for athens, but there is for Watkinsville)
if city=="Athens":
cityWDf = wDf.loc[(wDf["City"] == athensReplacement)]
else:
cityWDf = wDf.loc[(wDf["City"] == city)]
## make lists of alfalfa data- make list of dates and list of crop yields
# lets make a dictionary holding the values to be plotted. It will
# be of the form: {date_of_Cut: avgYield of all varieties}. This will
# be used to store the average yield of all alfalfa varieties.
plotDict = {}
for index, row in cityAlfDf.iterrows():
doc = cityAlfDf.loc[index, "Date of Cut"]
if doc not in plotDict:
plotDict[doc] = [0.0, 0] #this list is of the form [sumOfYield, numberOfVarietiesInSum]
aYield = cityAlfDf.loc[index, "Yield (tons/acre)"]
plotDict[doc][0] += aYield
plotDict[doc][1] += 1
## make lists that will be used to make the plot
xListAlf = []
yListAlf = []
for key in plotDict:
# get x-value
datetimeObj = datetime.datetime.strptime(key, '%Y-%m-%d')
xListAlf.append(datetimeObj)
# get y-value
aSum = plotDict[key][0]
n = plotDict[key][1]
avg = (aSum/n)*1.0
yListAlf.append(avg)
# normalize all the values in yListAlf
maxValue = max(yListAlf)
yListAlf = [float(i)/maxValue for i in yListAlf]
# make lists of weather data- make list of dates and weather info
xListW = []
yListW = []
for index, row in cityWDf.iterrows():
#get x-value
datePoint = cityWDf.loc[index, "Date"]
datePoint = datetime.datetime.strptime(datePoint, '%Y-%m-%d')
#get y-value
weatherPoint = cityWDf.loc[index, weather]
#fill lists
xListW.append(datePoint)
yListW.append(weatherPoint)
# normalize all the values in yListW
maxValue = max(yListW)
yListW = [float(i)/maxValue for i in yListW]
## make the plot
fig = plt.figure()
plt.plot(xListW, yListW, color="b", label=weather, linewidth=0.5)
plt.plot(xListAlf, yListAlf, color='r', label="Crop Yield (tons/acre)", linestyle='--', marker='o')
## plot settings
# make title
index = weather.find('(')
weatherString = weather[:index]
title = "Yield and " + weatherString + " for " + city + ", sown at " + str(sownDate)
plt.title(title)
# make a legend and place it below the picture
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2), shadow=True, ncol=2)
plt.xticks(rotation=45)
# show plot
if show:
plt.show()
return fig
def makeAlfWeatherPDFDaily(saveLocation, citySownList, weatherList, alfDf, wDf, athensReplacement="Watuga", show=True):
"""
inputs- saveLocation - raw string - directory location where the pdf will be saved.
- citySownList - list of tuples of the form (String1, String2) - where String1 is a city
in the "City" col of alfDf and wDf and String 2 is the sown date
(year-month-day XXXX-XX-XX) in the "Date Sown" col of alfDf.
- weatherList - list of strings - where each string is a col name in wDf
- alfDf - dataframe - dataframe storing alfalfa yield data. Must have columns labeled
"City", "Date Sown", "Date of Cut", and "Yield (tons/acre)"
- wDf - dataframe - dataframe storing weather data. Must have colums labeled
"City", "Date", and weather(this is the input variable)
-athensReplacement - string - the GAEMN data does not have athens weather data, but it does
have Watkinsville data. So this input must have a value in the
col "City" of wDf. This data will be used as the athens data.
-show - boolean - If 'True', then the final plot will be shown. Else, no plots will be shown.
outputs- no output, but will save a pdf of all of the plots that are made
NOTE: It is also assumed that there is a df with the variety yield data ('alfDf') and a different dataframe
has the daily weather data ('wDf')
"""
# import pdf stuff
from matplotlib.backends.backend_pdf import PdfPages
# make plots and save them
pdf = matplotlib.backends.backend_pdf.PdfPages(saveLocation)
for city, sownDate in citySownList:
for weather in weatherList:
fig = plotAlfAndWeather(alfDf, wDf, city, sownDate, weather, athensReplacement, show)
pdf.savefig(fig, bbox_inches='tight')
pdf.close()
def plotYieldAndWeather(aDf, city, sownDate, weatherVar, show=True):
"""
inputs - aDf - dataframe - must have columns labeled "Date of Cut", "City", "Date Sown", and "Variety"
- city - string - the name of the city whose data should be plotted
- sownDate - string - the date in which the crop whose yield should be plotted is sown
- weatherVar - string - the name of the column in "aDf" whose data will be graphed along with the yield
- show - boolean - if True, the the plot is shown, else the plot is not shown
output - a matplotlib figure of the crop yield and the weather data
NOTE: This function should be used with the aggregate data being loaded in as 'aDf'. So both the alfalfa yield data,
the weather data, and the aggregatted weather data should all be in this table.
"""
import matplotlib.pyplot as plt
# make a sub dataframe that only contains the relevant information
subDf = aDf.loc[(aDf['City'] == city) & (aDf["Date Sown"] == sownDate)]
# lets make a dictionary holding the values to be plotted. It will
# be of the form: {date_of_Cut: [sumOfYield, numberOfVarietiesInSum, weatherVal]}
plotDict = {}
for index, row in subDf.iterrows():
doc = subDf.loc[index, "Date of Cut"]
if doc not in plotDict:
weatherVal = subDf.loc[index, weatherVar]
plotDict[doc] = [0.0, 0, weatherVal] #this list is of the form [sumOfYield, numberOfVarietiesInSum, weatherVal]
aYield = subDf.loc[index, "Yield (tons/acre)"]
plotDict[doc][0] += aYield
plotDict[doc][1] += 1
## make lists that will be used to make the plot
xVals = []
yValsYield = []
yValsW = []
for key in plotDict:
# get x-value
datetimeObj = datetime.datetime.strptime(key, '%Y-%m-%d')
xVals.append(datetimeObj)
# get yield y-value
aSum = plotDict[key][0]
n = plotDict[key][1]
avg = (aSum/n)*1.0
yValsYield.append(avg)
# get weather y-values
weatherVal = plotDict[key][2]
yValsW.append(weatherVal)
# get pearson correlation coefficient
corr = np.corrcoef(yValsYield, yValsW)[0, 1]
# normalize all the values in yValsYield
minValue = min(yValsYield)
maxValue = max(yValsYield)
yValsYield = [((float(i) - minValue)/(maxValue - minValue)) for i in yValsYield]
# normalize all the values in yValsW
minValue = min(yValsW)
maxValue = max(yValsW)
yValsW = [((float(i) - minValue)/(maxValue - minValue)) for i in yValsW]
## make the plot
fig = plt.figure()
plt.plot(xVals, yValsYield, color="r", label="Crop Yield (tons/acre)", linestyle='--', marker='o')
plt.plot(xVals, yValsW, color='b', label=weatherVar, linestyle='--', marker='o')
#make an empty plot so i can have the correlation value
plt.plot([], [], ' ', label="R = " + str(corr) )
## plot settings
# make title
index = weatherVar.find('(')
weatherString = weatherVar[:index]
title = "Yield and " + weatherString + " for " + city + ", sown at " + str(sownDate)
plt.title(title)
# make a legend and place it below the picture
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2), shadow=True, ncol=2)
plt.xticks(rotation=30)
if show:
#show the plot
plt.show()
return fig
def makeYieldWeatherPDFAggregate(saveLocation, citySownList, weatherList, aDf, show=True):
"""
inputs- saveLocation - raw string - directory location where the pdf will be saved.
- citySownList - list of tuples of the form (String1, String2) - where String1 is a city
in the "City" col of alfDf and wDf and String 2 is the sown date
(year-month-day XXXX-XX-XX) in the "Date Sown" col of alfDf.
- weatherList - list of strings - where each string is a col name in wDf
- aDf - dataframe - must have columns labeled "Date of Cut", "City", "Date Sown", and "Variety"
- show - boolean - If 'True', then the final plot will be shown. Else, no plots will be shown.
outputs- no output, but will save a pdf of all of the plots that are made
NOTE: This function should be used with the aggregate data being loaded in as 'aDf'. So both the alfalfa yield data,
the weather data, and the aggregatted weather data should all be in this table.
"""
# import matplotlib and pdf stuff
import matplotlib
from matplotlib.backends.backend_pdf import PdfPages
# make plots and save them
pdf = matplotlib.backends.backend_pdf.PdfPages(saveLocation)
for city, sownDate in citySownList:
for weatherVar in weatherList:
fig = plotYieldAndWeather(aDf, city, sownDate, weatherVar, show=True)
pdf.savefig(fig, bbox_inches='tight')
pdf.close()
#########################################################################################################################
# DATE FUNCTIONS
#########################################################################################################################
def dateStringToJulianDay(dateString):
"""
inputs- dateString - string of the form "YEAR-MONTH-DAY" (XXXX-XX-XX) - a string representing a date that will be
converted to a Julian date (int ranging from 0 and 365).
output- an integer representing the number of days since the January 1st of that year.
"""
### error checks- checks that the input has the correct format
if not (isinstance(dateString, str)):
raise ValueError("The input is not a string; the input should be a string")
try:
datetimeObj = datetime.datetime.strptime(dateString, '%Y-%m-%d')
except ValueError:
raise ValueError("The input has an incorrect data format; it should be YYYY-MM-DD")
# convert the dateString to a date object
datetimeObj = datetime.datetime.strptime(dateString, '%Y-%m-%d').date()
# get the year
year = int(datetimeObj.year)
#find the amount of days that has passed since Jan 1st of that year
d0 = datetime.date(year, 1, 1)
delta = datetimeObj - d0
julianDay = delta.days + 1
# return result
return julianDay
def interpolatePercentCover(percentDatePrev, percentDateAfter, percentCoverPrev, percentCoverAfter, cutDate):
"""
inputs: percentDatePrev - date obj - date prior to cut date that recorded the percent cover
percentDateAfter - date obj - date after the cut date that recorded the percent cover
percentCoverPrev - float - percent cover measured on percentDatePrev
percentCoverAfter - float - percent cover measured on percentDateAfter
cutDate - date obj - the date a variety was cut and its yield was measured.
output: float between 0.0 and 1.0 representing the estimated percent cover on
"""
# find the amount days between the times the percent cover was measured
daysBetweenCoverMeasurements = percentDateAfter - percentDatePrev
# find the amount of days between the cut date and the date after it where the percent cover was measured
daysBetweenCutDateAndCoverMeasurement = percentDateAfter - cutDate
# get the ratio
ratio = (1.0*daysBetweenCutDateAndCoverMeasurement)/(daysBetweenCoverMeasurements)
# get the percent cover on the cutDate
cutPercentCover = (-1.0)*(ratio*(percentCoverAfter - percentCoverPrev) - percentCoverAfter)
# return result
return cutPercentCover
###############################################################################################
# Machine Learning Stuff
###############################################################################################
def makeTrainTestData(xDf, yDf, testSize=0.2, trainSize=None, randomSeed=None):
"""
inputs: xDf - dataframe - where each column contains values to be used to make machine learning model
yDf - dataframe - with a single column, such that the column contains values that should be the
expected result from a machine learning model when given the corresponding inputs from
'xDf'.
testSize - float ranging from 0.0 to 1.0, or an int - If a float, this represents the percentage of the data that
should be in a testing set. If an int, then this represents the absolute number of data points that
should be included in the testing sets.
trainSize - float ranging from 0.0 to 1.0, or an int - If a float, this represents the percentage of the data that
should be in a training set. If an int, then this represents the absolute number of data points that
should be included in the training sets.
randomSeed - int - The seed that will decide how the data will be randomized before making the training/testing sets.
outputs: xTrain - numpy array - contains all of the training data. Used for training models.
yTrain - list - has all of the ground truth outputs. This corresponds to xTrain. Used for training models.
xTest - numpy array - contains all of the testing data. Used for testing models.
yTest - list - has all of the ground truth outputs. This corresponds to xTest. Used for testing models.
NOTE: It is assumed that every value within one column has the same data type. There should be no instances in either input
Dataframe where there is no value in a row.
NOTE: The value in the first row of 'outputDataframe' is the true value that corresponds to the inputs from the first row
of the 'inputDataframe'. This is the case for all rows.
"""
# make the first input to train_test_split
X = xDf.values
# make the second input to train_test_split
columnName = yDf.columns[0]
y = []
for i in range(len(yDf.index)): # loop through every row of the dataframe
y.append(yDf.iloc[i, 0])
# use sklearn's train test split
x_train, x_test, y_train, y_test = train_test_split(X,
y,
test_size=testSize,
train_size=trainSize,
random_state=randomSeed)
return x_train, x_test, y_train, y_test
def getBestModel(N, xDf, yDf, emptyModel, paramGrid, features, metricToOptimize='r2'):
"""
inputs: N - int - the number of times the model should be trained and evaluated.
xDf - pandas dataframe - the rows represent the data points, the columns represent the features. These
are the inputs into the model
yDf - pandas dataframe - the rows represent the data points, there is only one column. This contains the
the target values for the model.
emptyModel - sklearn model - a valid sci-kit learn model with a 'fit' method.
paramGrid - dictionary - the para_grid to be used with this model in a grid search. Note that each parameter name
in the grid must start with 'model__' (two underscores).
features - int or float - if int, then use SelectKBest where k='features'. If float, use SelectPercentile
where 'features' is the percentage
testSize - float - the percentage of the data that should be used for the testing set (if method=='split')
metricToOptimize - string - either 'mae' or 'r2'.
outputs: avgMAE - the average mean absolute error of the model as it is evaluated N times.
avgRSq - the average R^2 value of the model as it is evaluated N times.
bestMAE - the mean absolute error of the best model out of the N iterations.
bestRSq - the R^2 of the best model out of the N iterations.
bestModel - the trained best model out of the N iterations.
NOTE: This assumes the data in xDf has been standardized or normalized before being used in this function.
"""
# initialize the outputs
avgMAE = 0.0
avgRSq = 0.0
bestRSq = -9999999999.99
bestMAE = 9999999999.99
# get the input features in the correct format
X = xDf.values
# put the target values in the correct format
columnName = yDf.columns[0]
y = []
for i in range(len(yDf.index)): # loop through every row of the dataframe
y.append(yDf.iloc[i, 0])
# convert the list to a numpy array
y = np.asarray(y)
# make the cv settings
cv = KFold(n_splits=N, random_state=42, shuffle=True)
# for every fold
for train_index, test_index in cv.split(X):
#for train_index, test_index in zip(X[:224], X[224:]):
# standardization
standardScaler = preprocessing.StandardScaler()
# feature selection
if type(features) == int:
featureSelection = SelectKBest(f_regression, k=features)
elif type(features) == float:
featuresPercentile = features/100.0
featureSelection = SelectPercentile(f_regression, percentile=featuresPercentile)
else:
raise ValueError("The input 'features' is not an integer or a float. It should be.")
# make a pipeline
pipe = Pipeline(steps=[('standardization', standardScaler),
('feature selection', featureSelection),
('model', emptyModel)])
# get the train and test data
xTrain, xTest, yTrain, yTest = X[train_index], X[test_index], y[train_index], y[test_index]
# do a grid search and K-fold cross validation
numFolds = 5 # 5-Fold cross validation
# make the model with optimized hyperparameters via a grid search with cross validation
model = GridSearchCV(
estimator=pipe,
param_grid=paramGrid,
cv=KFold(n_splits=numFolds, shuffle=True),
scoring='neg_mean_absolute_error',
return_train_score=False
)
# fit model
model.fit(xTrain, yTrain)
# get predictions
pred = model.predict(xTest)
trainPred = model.predict(xTrain)
# find errors
meanAbsoluteError = mean_absolute_error(yTest, pred)
trainMeanAbsoluteError = mean_absolute_error(yTrain, trainPred)
# find the R^2 values
rSq = r2_score(yTest, pred)
trainRSq = r2_score(yTrain, trainPred)
# add the errors and R Squared to average values
avgMAE += meanAbsoluteError
avgRSq += rSq
# check to see which metric should be optimized
if metricToOptimize == 'r2':
# check to see if any of these are the best values
if (rSq > bestRSq):
bestMAE = meanAbsoluteError
bestModel = model
bestRSq = rSq
elif metricToOptimize == 'mae':
# check to see if any of these are the best values
if (meanAbsoluteError < bestMAE):
bestMAE = meanAbsoluteError
bestModel = model
bestRSq = rSq
else:
raise ValueError("The input 'metricToOptimize' does not have a valid input. It must be 'r2' or 'mae'.")
# divide the sums by N to get the averages
avgMAE /= N
avgRSq /= N
## get the features that were selected to train the best model
# get all of the feature names and store in a numpy array
features = np.asarray(list(xDf))
# get a boolean list to say which features were kept
boolArray = bestModel.best_estimator_.named_steps['feature selection'].get_support()
# get a list of which features were kept
featuresUsed = np.ndarray.tolist(features[boolArray])
## return the results
return avgMAE, avgRSq, bestMAE, bestRSq, bestModel, featuresUsed
# use xTest signature to train on source, test on target
#def saveMLResults(xTest, yTest, N, xDf, yDf, modelList, workingDir, numFeatures, printResults=True):
def saveMLResults(N, xDf, yDf, modelList, workingDir, numFeatures, printResults=True):
"""
inputs: N - int - the number of times the model should be trained and evaluated.
xDf - pandas dataframe - the rows represent the data points, the columns represent the features. These
are the inputs into the model
yDf - pandas dataframe - the rows represent the data points, there is only one column. This contains the
the target values for the model.
modelList - list of tuples - each tuple takes the form of
(empty sklearn model, parameter grid for sklearn's gridsearchcv, name of file to be saved).
The parameter grid should be a dictionary of possible parameter values for the empty model.
Look at sklearn's documentation for more information
workingDir - string - the directory where the final results should be saved
numFeatures - int or float - if int, then use SelectKBest where k='features'. If float, use SelectPercentile
where 'features' is the percentage
printResults - boolean - if True, then also print the results. Otherwise, dont print the results
outputs: nothing is returned, but the results are saved at the given location. A tuple is saved of the form
(bestModel, bestFeatures, bestMAE, bestRSq, avgMAE, avgRSq). Each value means the following
-bestModel - the best model found by 'getBestModel'. Note that this is the trained sklearn model itself
-bestFeatures - the chosen features for the best model
-bestMAE - the mean absolute error of the best model
-bestRSq - the R squared value of the best model
-avgMAE - the average mean absolute error of the model over the N iterations
-avgRSq- the average R squared value of the model over the N iterations
"""
# for every entry in the list
for tup in modelList:
model = tup[0]
paramGrid = tup[1]
filename = tup[2]
# get results
avgMAE, avgRSq, bestMAE, bestRSq, bestModel, bestFeatures = getBestModel(N, xDf, yDf, model, paramGrid,
features=numFeatures, metricToOptimize='r2')
# convert tons to lbs to make results more readable
avgMAE = (round(avgMAE*2000, 3))
bestMAE = (round(bestMAE*2000, 3))
# get the save location
saveLoc = workingDir + "\\" + filename
# get model name
stopIndex = filename.find(".")
modelName = filename[:stopIndex]
#save the new model over the old model if the new model has a better R^2 value
joblib.dump((bestModel, bestFeatures, bestMAE, bestRSq, avgMAE, avgRSq), saveLoc)
# if 'printResults' is True, then print results
if printResults:
print("model: ", modelName)
print("Avg MAE: ", avgMAE)
print("Avg R squared: ", round(avgRSq, 3))
print("Best MAE: ", bestMAE)
print("Best R squared: ", round(bestRSq, 3))
print("Parameters of the best model: ", bestModel.best_params_)
print("Features selected by best model: ", bestFeatures)
print(" ")
#m,r = predictTargetWithSource(xTest,yTest,bestModel)
# print("results:")
# print(m)
# print(r)
#count += 1
def predictTargetWithSource(xTest,yTest,model):
print("test results on our test data: ")
# get predictions
pred = model.predict(xTest)
#trainPred = model.predict(xTrain)
# find errors
meanAbsoluteError = mean_absolute_error(yTest, pred)
#print(meanAbsoluteError)
# mae = str(meanAbsoluteError)
#trainMeanAbsoluteError = mean_absolute_error(yTrain, trainPred)
# find the R^2 values
rSq = r2_score(yTest, pred)
return(meanAbsoluteError, rSq)
# print("Test mae: " + mae)
# print("Test rsq: " + "{:.2f}".format(rSq))
#trainRSq = r2_score(yTrain, trainPred)
# # add the errors and R Squared to average values
# avgMAE += meanAbsoluteError
# avgRSq += rSq
# # check to see which metric should be optimized
# if metricToOptimize == 'r2':
# # check to see if any of these are the best values
# if (rSq > bestRSq):
# bestMAE = meanAbsoluteError
# bestModel = model
# bestRSq = rSq
# elif metricToOptimize == 'mae':
# # check to see if any of these are the best values
# if (meanAbsoluteError < bestMAE):
# bestMAE = meanAbsoluteError
# bestModel = model
# bestRSq = rSq
# else:
# raise ValueError("The input 'metricToOptimize' does not have a valid input. It must be 'r2' or 'mae'.")
| true |
7cd4282c58dc99b65c308f5353fe45b79a84a64a
|
Python
|
asdf2014/algorithm
|
/Codes/gracekoo/49_group-anagrams.py
|
UTF-8
| 454 | 3.46875 | 3 |
[
"Apache-2.0"
] |
permissive
|
# -*- coding: utf-8 -*-
# @Time: 2020/2/20 00:01
# @Author: GraceKoo
# @File: 49_group-anagrams.py
# @Desc:https://leetcode-cn.com/problems/group-anagrams/
import collections
class Solution:
def groupAnagrams(self, strs):
ans = collections.defaultdict(list)
for s in strs:
ans[tuple(sorted(s))].append(s)
return ans.values()
so = Solution()
print(so.groupAnagrams(["eat", "tea", "tan", "ate", "nat", "bat"]))
| true |
e99e582b478f2097a4acce8d0fc5c9d837cb422f
|
Python
|
Udit107710/CompetitiveCoding
|
/Codeforces/Contest1294/D.py
|
UTF-8
| 157 | 2.6875 | 3 |
[] |
no_license
|
from sys import stdin
t, x = map(int, stdin.readline())
arr = []
for _ in range(t):
arr.append(int(stdin.readline()))
new_arr = sorted(arr)
| true |
fcf163325a6a444010b58657e5ded7c45a23a08f
|
Python
|
mo-oq/tkseem
|
/tkseem/test.py
|
UTF-8
| 390 | 3.03125 | 3 |
[
"MIT"
] |
permissive
|
from .util import remove_tashkeel
import unittest
class TestUnit(unittest.TestCase):
def test_tashkeel(self):
self.assertEqual(remove_tashkeel("مِكَرٍّ مِفَرٍّ مُقبِلٍ مُدبِرٍ مَعًا")
, "مكر مفر مقبل مدبر معا", "Remove Tashkeel is not working")
unittest.main(argv=['first-arg-is-ignored'], exit=False)
| true |
0a6d9ecdb48d0db8b1101db135463416644e2bbe
|
Python
|
HerMelin84/165
|
/week2/wordcounter.py
|
UTF-8
| 276 | 2.875 | 3 |
[] |
no_license
|
from sys import argv
def word_counter():
for i in argv[1:]:
with open(i) as f:
lc = 0
for c,line in enumerate(f):
for line in line.split():
lc += 1
print i +": " + str(lc)
word_counter()
| true |
ea28363d770f1263b51bf2ae3d0b679fa54b72e9
|
Python
|
houyinhu/AID1812
|
/pythonNet/sock_attr.py
|
UTF-8
| 452 | 2.96875 | 3 |
[] |
no_license
|
from socket import *
s = socket()
#对套接字设置为立即重用端口
s.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
print(s.getsockopt(SOL_SOCKET,SO_REUSEADDR))
print(s.family) #地址类型
print(s.type) #套接字类型
s.bind(('0.0.0.1',8888))
print(s.getsockname())#获取绑定的addr
print(s.fileno()) #获取文件描述符
s.listen(3)
c,addr = s.accept()
print(c.getpeername())
IO句柄
stdout.fileno()
stderr.fileno()
stdin.fileno()
| true |
a5e02a5deda11cfbf6e00255c3eeb4c0245a2c16
|
Python
|
BilllYang/Alphacat_bot
|
/fsm.py
|
UTF-8
| 5,458 | 2.875 | 3 |
[] |
no_license
|
from transitions.extensions import GraphMachine
class TocMachine(GraphMachine):
def __init__(self, **machine_configs):
self.machine = GraphMachine(
model = self,
**machine_configs
)
def is_going_to_start_state(self,update):
text = update.message.text
return text == 'hi'
def is_going_to_state1(self, update):
text = update.message.text
return text.lower() == 'start'
def is_going_to_state1_2(self, update):
global text
text = update.message.text
return ((text == '剪刀')| (text == '石頭') | (text == '布'))
def is_going_to_state1_3(self, update):
global text
text = update.message.text
return ((text == '剪刀')| (text == '石頭') | (text == '布'))
def is_going_to_state1_4(self, update):
global text
text = update.message.text
return ((text == '剪刀')| (text == '石頭') | (text == '布'))
def is_going_to_state2(self, update):
text = update.message.text
return text.lower() == 'method'
def is_going_to_state3(self, update):
text = update.message.text
return text.lower() == 'intro'
def on_enter_start_state(self,update):
update.message.reply_text("嗨 我是猜拳機器人,要我陪你猜個拳嗎?連續猜贏三次有獎勵喔!\n start-->開始猜拳\n intro-->深入了解我的原理(和獎品) \n method -->相關理論")
self.advance(update)
def on_enter_state1(self, update):
update.message.reply_text("剪刀,石頭......")
self.advance(update)
def on_exit_state1(self, update):
print('Leaving state1')
def on_enter_state1_2(self, update):
global text
if text == '剪刀':
update.message.reply_text("布!")
update.message.reply_text("你運氣還不錯")
update.message.reply_text("再來!!!剪刀,石頭......")
self.advance(update)
elif text == '石頭':
update.message.reply_text("剪刀!")
update.message.reply_text("你運氣還不錯")
update.message.reply_text("再來!!!剪刀,石頭......")
update.message.text = ''
self.advance(update)
elif text == '布':
update.message.reply_text("石頭!")
update.message.reply_text("你運氣還不錯")
update.message.reply_text("再來!!!剪刀,石頭......")
update.message.text = ''
self.advance(update)
def on_exit_state1_2(self, update):
print("......")
def on_enter_state1_3(self, update):
global text
if text == '剪刀':
update.message.reply_text("布!")
update.message.reply_text("好了,你下次不可能贏了")
update.message.reply_text("再來!!!剪刀,石頭......")
update.message.text = ''
self.advance(update)
elif text == '石頭':
update.message.reply_text("剪刀!")
update.message.reply_text("好了,你下次不可能贏了")
update.message.reply_text("再來!!!剪刀,石頭......")
update.message.text = ''
self.advance(update)
elif text == '布':
update.message.reply_text("石頭!")
update.message.reply_text("好了,你下次不可能贏了")
update.message.reply_text("再來!!!剪刀,石頭......")
update.message.text = ''
self.advance(update)
def on_exit_state1_3(self, update):
print("......")
def on_enter_state1_4(self, update):
global text
if text == '剪刀':
update.message.reply_text("石頭!")
update.message.reply_text("遜")
update.message.reply_text("掰回家練練再來")
update.message.text = ''
self.go_back(update)
elif text == '石頭':
update.message.reply_text("布!")
update.message.reply_text("爛")
update.message.reply_text("掰回家練練再來")
update.message.text = ''
self.go_back(update)
elif text == '布':
update.message.reply_text("剪刀!")
update.message.reply_text("嫩")
update.message.reply_text("掰回家練練再來")
update.message.text = ''
self.go_back(update)
def on_exit_state1_4(self, update):
print("...")
def on_enter_state2(self, update):
update.message.reply_text("Association Rules : http://myweb.fcu.edu.tw/~mhsung/Ecommerce/Data_Mining/Association_Folder/DM_association.htm\n 貝氏定理 : https://taweihuang.hpd.io/2017/03/21/mlbayes/ \n Recurrent Neural Network : http://cpmarkchang.logdown.com/posts/278457-neural-network-recurrent-neural-network")
self.go_back(update)
def on_exit_state2(self, update):
print('Leaving state2')
def on_enter_state3(self, update):
update.message.reply_text("我是基於Association rule分析大量的猜拳習慣並以貝氏定理建構recurrent neural networks的猜拳機器人,在交叉驗證下,結果顯示你可以連續猜贏我三次的機率小於ε\n如果你幸運連續猜贏我三次,歡迎來找我,我可以請你吃明天晚餐")
self.go_back(update)
def on_exit_state3(self, update):
print('Leaving state3')
| true |
4843a8e0d1b827040f5c21c39cdc2e59137b8ad4
|
Python
|
AdamJSoftware/iti1120
|
/assignments/A4/pt2/a4_Q3_300166171.py
|
UTF-8
| 864 | 3.78125 | 4 |
[] |
no_license
|
# ITI1120 Assignment 4 - PT 2 -2
# Adam Jasniewicz 300166171
def longest_run(array):
'''
array -> array of floats
Returns the length of the longest run
'''
last_run = None;
run_lengths = []
same_run = False
if len(array) != 0:
for run in array:
if run == last_run:
if same_run:
run_lengths[-1] = run_lengths[-1] + 1
else:
same_run = True
run_lengths.append(2)
else:
run_lengths.append(1)
same_run = False
last_run = run
return max(run_lengths)
return 0
raw_input = input("Please input a list of numbers separated by space: ").strip().split()
number_input = []
for string in raw_input:
number_input.append(float(string))
print(longest_run(number_input))
| true |
985e6e2e8224c22dd49e9f1673dcb2ce3d2956c6
|
Python
|
Cloudbeast/NYU_Python
|
/chartype.py
|
UTF-8
| 301 | 4.25 | 4 |
[] |
no_license
|
print("Enter a character: ")
s = input()
result = "a digit."
if s.isalpha():
if s.isupper():
result= "an upper case letter."
else:
result="a lower case letter."
elif s.isdigit()== False:
result="a non-alphanumeric character."
print(s, " is ", result, sep="")
| true |
cd3725178e6e441e69220df3793d755e1b780f2d
|
Python
|
mrudula-pb/Python_Code
|
/CS62/calculate_MeanMedianRange.py
|
UTF-8
| 1,102 | 4.46875 | 4 |
[] |
no_license
|
#Part 4: Define a function to calculate the mean, median, and range (range = max-min) of a
# given list of numbers
# #hint: use the 'statistics' library for mean and median import statistics my_list = [1,3,5,4,2,6,8,4,8,9,11]
import statistics
class Solution:
def calculate_MeanMedianRange(self, lst):
# Mean
total = 0
for item in lst:
total += item
mean = int(total/2)
#print("Mean is: " + str(mean))
# Median in a sorted list
new_lst = sorted(lst)
lst_length = len(new_lst)
median = new_lst[int(lst_length/2)]
#print("Median is: " + str(median))
# finding Max, Min value
max = new_lst[lst_length-1]
min = new_lst[0]
range = max - min
# Range, range = max-min
#print("Range is: " + str(range))
return mean, median, range
solution = Solution()
my_list = [1,3,5,4,2,6,8,4,8,9,11]
mean, median, range = solution.calculate_MeanMedianRange(my_list)
print("Mean is: " + str(mean))
print("Median is: " + str(median))
print("Range is: " + str(range))
| true |
dee74de934b998b5cfcc8a35c707409fdbdd831a
|
Python
|
schneiderfelipe/python-warrior
|
/pythonwarrior/abilities/explode.py
|
UTF-8
| 704 | 3.015625 | 3 |
[
"MIT"
] |
permissive
|
from pythonwarrior.abilities.base import AbilityBase
class Explode(AbilityBase):
def description(self):
return("Kills you and all surrounding units."
" You probably don't want to do this intentionally.")
def perform(self):
if self._unit.position:
self._unit.say("explodes, collapsing the ceiling and "
"damaging every unit.")
for unit in self._unit.position.floor.units:
unit.take_damage(100)
def pass_turn(self):
if self.time and self._unit.position:
self._unit.say("is ticking")
self.time -= 1
if self.time == 0:
self.perform()
| true |
6b77b3bd9f1763f29c2abbb0fb90638f639aa93b
|
Python
|
arupiot/UDMI-dummy
|
/Interface.py
|
UTF-8
| 3,728 | 2.703125 | 3 |
[
"MIT"
] |
permissive
|
import curses
from curses.textpad import Textbox, rectangle
from curses import wrapper
from time import sleep
import threading
SPACE_CHAR = 32
TITLE_ROW = 0
AUTOSEND_INFO_ROW = 3
MESSAGE_INFO_ROW = 4
KEYMAP_START_ROW = 5
SENDING_ROW = 8
TOPIC_INFO_START_ROW = 13
BROKER_INFO_START_ROW = 14
EXIT_START_ROW = 16
class Interface():
def __init__(self, device):
self.device = device
self.auto_send = False
self.auto_send_delay = 0.4
self.auto_timer_started_at = 0
self.auto_timer_elapsed = 0
self.stdscr = None
self.auto_send_thread = threading.Thread(target=self.autoMessage, args=())
self.auto_send_thread.start()
self.auto_send_break = False
wrapper(self.main)
def cleanup(self):
self.stdscr.clrtoeol()
self.stdscr.refresh()
def autoMessage(self):
while 1:
if self.auto_send:
self.sendRandomMessage()
sleep(self.auto_send_delay)
if self.auto_send_break: break
def dynamicKeyPress(self, c):
for point, val in self.device.value_mapping.items():
in_char = chr(c)
if in_char == val[1] and val[0] == 'digital':
self.cleanup()
msg = self.device.generateMessage(point)
self.device.broker.sendMessage(self.device.pub_topic, msg)
self.cleanup()
self.stdscr.addstr(SENDING_ROW, 0, "Last sent: " + str(msg))
return True
return False
def sendRandomMessage(self):
self.cleanup()
msg = self.device.generateMessage()
self.device.broker.sendMessage(self.device.pub_topic, msg)
self.cleanup()
self.stdscr.addstr(SENDING_ROW, 0, "Last sent: " + str(msg))
def main(self, stdscr):
self.stdscr = stdscr
stdscr.addstr(TITLE_ROW, 0, "In a world of fancy interfaces, welcome to the UDumMI!")
stdscr.addstr(AUTOSEND_INFO_ROW, 0, "Press the space bar to autosend")
stdscr.addstr(MESSAGE_INFO_ROW, 0, "Press (or hold) 's' to send a random message" )
# printing key mapping
keymap_row = KEYMAP_START_ROW
for point, val in self.device.value_mapping.items():
if val[0] == 'digital':
stdscr.addstr(keymap_row, 0, "Press '" + str(val[1]) +"' to toggle '" + str(point) + "' between 0->100")
keymap_row += 1
stdscr.addstr(TOPIC_INFO_START_ROW+keymap_row, 0, "Sending on MQTT topic: '" + str(self.device.pub_topic) + "'")
stdscr.addstr(BROKER_INFO_START_ROW+keymap_row, 0, "to broker: " + str(self.device.broker.host) + " on port: " + str(self.device.broker.port))
stdscr.addstr(EXIT_START_ROW+keymap_row, 0, "Press 'q' to exit")
while 1:
c = stdscr.getch()
if self.dynamicKeyPress(c):
# Message sending logic is done in the method...
pass
elif c == ord('s'):
self.sendRandomMessage()
elif c == SPACE_CHAR:
self.auto_send = not self.auto_send
if self.auto_send:
self.cleanup()
stdscr.addstr(AUTOSEND_INFO_ROW, 0, "Autosend is ON! Press space to turn it off")
self.cleanup()
if not self.auto_send:
self.cleanup()
stdscr.addstr(AUTOSEND_INFO_ROW, 0, "Press the space bar to autosend")
self.cleanup()
elif c == ord('q'):
self.auto_send_break = True
del self.device
break # Exit the while loop
def __del__(self):
curses.endwin()
| true |
4c532d2b44cf83117862b1af98de3ce4dac54219
|
Python
|
j0k/algopractice
|
/qsort/qs2.py
|
UTF-8
| 5,383 | 2.71875 | 3 |
[] |
no_license
|
# 19.06.2017
A = [1,2,3,4,5,6,7,8,4,44,44,3,33,1,1,-1]
def qsort(a):
return a if len(a)<= 1 else \
qsort(filter( lambda x: x <= a[0], a[1:])) + \
[a[0]] + \
qsort(filter( lambda x: x > a[0], a[1:]))
# myLang
# qsort(a): a if len(a)<= 1 else qsort( a( # <= a[0] ) ),a[0],qsort( a( # > a[0] ) )
# qsort(a):
# p = ?a
# a if len(a)<= 1 else qsort( a[^(p)] ( # <= a[p] ) ),[a[p]],qsort( a[^(p)]( # > a[p] ) )
# a if len(a)<= 1 else qsort( a[^p] ( # <= a[p] ) ),[a[p]],qsort( a[^p]( # > a[p] ) )
# a if len(a)<= 1 else b=a[^p];qsort( b( # <= a[p] ) ),[a[p]],qsort( b( # > a[p] ) )
# a if len(a)<= 1 else b=a[^p];self( b( # <= a[p] ) ),[a[p]],self( b( # > a[p] ) )
# qsort(a):
# (e,p) = ?a;
# b = a[^p];
# self( b( # <= a[p] ) ), [a[p]],self( b( # > a[p] ) )
# qsort(a):
# (_,p) = ?a;
# b = a[^p];
# (L,R) = b( # <= a[p] )
# self(l), [ a[p] ], self(R)
# optimal version
#
# qsort(a):
# ret a if ( #a <= 1 )
# (ap,p) = ?a;
# [L,R] = a[^p] ( (# <= ap),(# > ap) )
# self(l), ap, self(R)
# optimal version
#
# qsort(a):
# ret a if ( #a <= 1 ) else
# (ap,p) = ?a;
# [L,R] = a[^p] ( (# <= ap),(# > ap) )
# self(l), ap, self(R)
# optimal version
#
# qsort(a):
# ret a if ( #a <= 1 ) else
# (ap,p) = ?a;
# [L,R] = a[^p] ( <= ap, > ap )
# self(l), ap, self(R)
# optimal version
#
# qsort(a):
# ret a if ( #a <= 1 ) else
# (ap,p) = ?a;
# [L,R] = a[^p] ( <=, > ap )
# self(l), ap, self(R)
# first compile
# run on data
# execution will detailez object types ( that a is array )
# recompile
# get superoptimized code
# _asm(qsort) -> get assemblers code
# _inter(qsort) -> get intermediate code
# optimal version
#
# [1]: qsort(a):
# [2]: ret a if ( #a <= 1 ) else
# [3]: (ap,p) = ?a;
# [4]: [L,R] = a[^p] ( (# <= ap),(# > ap) )
# [5]: self(l), ap, self(R)
#
# code = _inter(qsort)
# code[3]
# ccode = _ccode(qsort)
# ccode[3]
# lispcode = _lisp(qsort)
# code[3]
#
#
#
#
# abstract qsort(a):
# a if ( #a == 0 ) else qsort(_1),[_2],qsort(_3)
#
# def qsort(a)
# (ap,p) = ?a;
# _2 = ap
# [_1,_3] = a[^p] (<=,> ap)
# we can get that constuction using
# optimal version
#
# qsort(a):
# ret a if ( #a <= 1 ) else
# (ap,p) = ?a;
# [L,R] = a[^p] ( <=, > ap )
# self(l), [ap], self(R)
#
# abstragate (qsort)
#
# mylang will have a very powerful abstragate method
# myLang - Abstract Juicy Collaborative Language (ajcl)
# you can do
# compform = _completeform(qsort)
# cf_qsort = compile(compform)
#
# print compform
# '''
# qsort(array a):
# if #a <= 1:
# return a
# else:
# (a_element, a_position) = random_choice (a)
# # (ap, p) = random_choice (a) // i don't know how it will be better
# a1 = a[out a_position]
# [L,R] = b.[select(<=,> a_element)]
# return qsort(L), [ap], qsort(R)
# '''
# a[^p] = a[ .. ^p ..] = a[ out(p) ] = a[ out p ] = a[]
#
# I don't show the most powerful interactive method for JACL
# maybe it will be J DIALOGED and C A L (JDCAL) or adl
#
# let try
#
# > def qsort(a)
#
# << created def qsort(a)
#
# > it will sort a
#
# << OK. _algorithm_sort in qsort.class
#
# > if #a <= 1 it will return a
#
# << OK. return a if #a <= 1
#
# > show
#
# << return a if #a <= 1
#
# > show object
#
# << def qsort(a)
# return a if #a <= 1
#
# > it will take random element in a
#
# < OK. element = ?a
#
# > and take all elements which <= element and > element
#
# << [A,B] = b( <=, > element )
#
# let A be Left, B - right
#
# << [Left, right] = b(<=, > element)
#
# > element = elem
#
# << elem = ?a
# << [Left, right] = a(<=, > elem)
#
# > if #a >= 1 it will return left,[elem],right
#
# << if #a >= 1: return left,[elem],right
#
# > show all
#
# << def qsort(a)
# return a if #a <= 1
# elem = ?a
# [Left, right] = a(<=, > elem)
# if #a >= 1: return left,[elem],right
#
# > format it and show
#
# << def qsort(a)
# return a if #a <= 1
# elem = ?a
# [Left, right] = a(<=, > elem)
# return left,[elem],right if #a >= 1
#
# ** also I can click on qsort and write examples of input data a
# ** a = [1,2,3,4,2,1,0]
#
# ** when I write >> make release version it will ask type(a) ~ int[]?
# then I check that type(a) ~ double[] or real[] or float[] or just
# example a = [-1,10,-2.0]
#
# ** also I can run qsort on test dataset at each moment I want during the development
# ** also i can use unicode to code лев,прав = a(<=, > elem)
## ** I found the error - cause i don't need to remove all elements = ?a
#
# > i don't need to remove all elements == elem
#
# <<? look at: [Left, right] = a(<=, > elem)
#
# > I have to drop a[elem position] before selecting
#
# << elem,position = ?a
# [Left, right] = a[^postion](<=, > elem)
#
# > format to short form and show all
#
# << def qsort(a)
# a if #a <= 1 else:
# elem, position = ?a
# [Left, right] = a[^position](<=, > elem)
# return left,[elem],right
#
# ** also at each moment I can choose any line and run to it.
#
# > qsort to lispform
#
# << (defun qsort(a)
# (if (<= #a , 1),
# a,
# (= [elem, position],? a)
# (= [Left, right], (<=, > elem) a[^position])
# (, left, [elem] ,right)))
#
# *** it's all about smart parsers with feedback and backpropogation
#
# ** it will be usefull to have simple plugin|lib to write GUI desktop app,
# iOS apps, Android apps and others
print qsort(A)
print qsort([-1,-2,-3])
| true |
db0c2c01568eac5defcacda1b174455e44c9729a
|
Python
|
Divisekara/Python-Codes-First-sem
|
/PA1/PA1 2015/pa1-19-2015.py
|
UTF-8
| 577 | 3.578125 | 4 |
[] |
no_license
|
def five_check(L):
u=0
d=0
for i in range(0,4):
diff=L[i]-L[i+1]
if diff<0:
u+=1
elif diff>0:
d+=1
if u==4:
return "upward"
elif d==4:
return "downward"
else:
return "unpredictable"
while True:
try:
L=map(int,raw_input().split())
except ValueError:
print "enter numerical integers only\n"
else:
status=[]
for i in range(len(L)-4):
status.append(five_check(L[i:i+5]))
print "Trend:" , ", ".join(status) , "\n"
| true |
272208357731dcdaaed993fdaa4348c4aea3d6ae
|
Python
|
HiPERCAM/hipercam
|
/hipercam/scripts/makeflat.py
|
UTF-8
| 18,744 | 2.671875 | 3 |
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
import sys
import os
import tempfile
import signal
import numpy as np
from trm import cline
from trm.cline import Cline
import hipercam as hcam
from hipercam import utils, spooler
__all__ = [
"makeflat",
]
####################################################
#
# makeflat -- makes flat fields from a set of frames
#
####################################################
def makeflat(args=None):
"""``makeflat [source] (run first last [twait tmax] | flist)
bias dark ngroup ccd lower upper [clobber] output``
Averages a set of images to make a flat field.
Typically flat-fields for HiPERCAM and ULTRA(CAM|SPEC) are taken with a
strongly time-variable twilight sky as the Sun sets or rises. A typical
flat field run may start out bright, or even saturated, but by the end be
only a few thousand counts above bias. Moreover, there are very often
stars visible in the images, so we usually take them while offsetting the
telescope in a spiral pattern. The challenge is to combine these images
while rejecting the stars and saturated frames and giving due weight to
the better exposed images. This moreover has to be done for each CCD which
vary significantly in sensitivity.
'makeflat' does this as follows: given an input list of files (or
optionally a single run), it reads them all in, debiases them
(optionally), and calculates the mean count level in each CCD,
normalises by the mean and writes out the results to temporary
files. For each CCD it then sorts the files by their (original)
mean level, and for those that lie between defined limits it takes
the median of the mean-mormalised frames in groups of defined
size. Thus, say one had 75 OK images, then these would be divided
into 10 groups, the first 9 having 7 frames, the last having
16. The median average of each of these would be taken. In each
case the mean levels would be adjusted to be the same before
taking the average to overcome the problem of taking a median of a
time-variable sky. The assumption is that while the level may
vary, the pattern of the image does not. It is up to the user to
check that this is correct. Each of the medians is adjusted to
have a mean equal to the sum of the means of the input
frames. Finally the normal average of all of these median frames
is taken and the mean level of the final output normalised to
1. The first step, taking the median in groups is designed to
remove the stars assuming that the telescope was spiralled. The
size of the groups ('ngroup' below is a crucial parameter in
whether this works). A good strategy is to run makeflat for a
succession of ever larger 'ngroup' and then to divide the results
into each other to see if stars are visible.
The final step, the average of the medians with adjusted mean
levels, is to ensure that the flats are combined in a way that
reflects the level of signal that they have, i.e. to avoid giving
equal weights to the median of a series of flats with 20,000 counts
per pixel and another series with 1,000 counts per pixel. This
somewhat complex procedure is implemented through a series of
temporary files which are written and read as the script runs, but
deleted at its end. This allows very large numbers to be combined
as long as there is enough memory to load 'ngroup' CCDs
simultaneously, which should usually be fine.
Parameters:
source : str [hidden]
Data source, five options:
| 'hs' : HiPERCAM server
| 'hl' : local HiPERCAM FITS file
| 'us' : ULTRACAM server
| 'ul' : local ULTRACAM .xml/.dat files
| 'hf' : list of HiPERCAM hcm FITS-format files
'hf' is used to look at sets of frames generated by 'grab'
or converted from foreign data formats. The standard
start-off default for ``source`` can be set using the
environment variable HIPERCAM_DEFAULT_SOURCE. e.g. in bash
:code:`export HIPERCAM_DEFAULT_SOURCE="us"` would ensure it
always started with the ULTRACAM server by default. If
unspecified, it defaults to 'hl'.
run : str [if source ends 's' or 'l']
run number to access, e.g. 'run034'
first : int [if source ends 's' or 'l']
exposure number to start from. 1 = first frame ('0' is
not supported).
last : int [if source ends 's' or 'l']
last exposure number must be >= first or 0 for the whole lot.
twait : float [if source ends 's' or 'l'; hidden]
time to wait between attempts to find a new exposure, seconds.
tmax : float [if source ends 's' or 'l'; hidden]
maximum time to wait between attempts to find a new exposure,
seconds.
flist : str [if source ends 'f']
name of file list. Assumed that these are dias and dark corrected.
bias : str
Name of bias frame to subtract, 'none' to ignore.
dark : str
Name of dark frame to subtract, 'none' to ignore. Note that
it is assumed all CCDs have the same exposure time when making
a dark correction.
ngroup : int
the number of frames. Probably should be at least 5, preferably
more. Experiment to see its effect.
ccd : str
CCD(s) to process, '0' for all, '1 3' for '1' and '3' only, etc.
Would almost always expect this to be set = '0'.
lower : list of floats
Lower limits to the mean count level for a flat to be included. The
count level is determined after bias subtraction. Should be the
same number as the selected CCDs, and will be assumed to be in the
same order. Use this to elminate frames that are of so low a level
that the accuracy of the bias subtraction could be a worry.
Suggested hipercam values: 3000 for each CCD. Enter values separated
by spaces.
upper : list of floats
Upper limits to the mean count level for a flat to be included. The
count level is determined *after* bias subtraction. Should be the
same number as the selected CCDs, and will be assumed to be in the
same order. Use this to eliminate saturated, peppered or non-linear
frames. Suggested hipercam values: 58000, 58000, 58000, 40000 and
40000 for CCDs 1, 2, 3, 4 and 5. Enter values separated by spaces.
ULTRACAM values 49000, 29000, 27000 for CCDs 1, 2 and 3.
clobber : bool [hidden]
clobber any pre-existing output files
output : str
output file. will be set by default to match the input name.
.. Note::
This routine writes the files returned by 'grab' to
automatically generated files, typically in .hipercam/tmp, to
avoid polluting the working directory. These are removed at
the end, but may not be if you ctrl-C. You should check
.hipercam/tmp for redundant files every so often
"""
command, args = cline.script_args(args)
# get the inputs
with Cline("HIPERCAM_ENV", ".hipercam", command, args) as cl:
# register parameters
cl.register("source", Cline.GLOBAL, Cline.HIDE)
cl.register("run", Cline.GLOBAL, Cline.PROMPT)
cl.register("first", Cline.LOCAL, Cline.PROMPT)
cl.register("last", Cline.LOCAL, Cline.PROMPT)
cl.register("twait", Cline.LOCAL, Cline.HIDE)
cl.register("tmax", Cline.LOCAL, Cline.HIDE)
cl.register("flist", Cline.LOCAL, Cline.PROMPT)
cl.register("bias", Cline.LOCAL, Cline.PROMPT)
cl.register("dark", Cline.LOCAL, Cline.PROMPT)
cl.register("ngroup", Cline.LOCAL, Cline.PROMPT)
cl.register("ccd", Cline.LOCAL, Cline.PROMPT)
cl.register("lower", Cline.LOCAL, Cline.PROMPT)
cl.register("upper", Cline.LOCAL, Cline.PROMPT)
cl.register("clobber", Cline.LOCAL, Cline.HIDE)
cl.register("output", Cline.LOCAL, Cline.PROMPT)
# get inputs
default_source = os.environ.get('HIPERCAM_DEFAULT_SOURCE','hl')
source = cl.get_value(
"source",
"data source [hs, hl, us, ul, hf]",
default_source,
lvals=("hs", "hl", "us", "ul", "hf"),
)
# set a flag
server_or_local = source.endswith("s") or source.endswith("l")
if server_or_local:
resource = cl.get_value("run", "run name", "run005")
root = os.path.basename(resource)
cl.set_default('output', cline.Fname(root, hcam.HCAM))
first = cl.get_value("first", "first frame to average", 1, 1)
last = cl.get_value("last", "last frame to average (0 for all)", first, 0)
if last < first and last != 0:
sys.stderr.write("last must be >= first or 0")
sys.exit(1)
twait = cl.get_value(
"twait", "time to wait for a new frame [secs]", 1.0, 0.0
)
tmax = cl.get_value(
"tmax", "maximum time to wait for a new frame [secs]", 10.0, 0.0
)
else:
resource = cl.get_value(
"flist", "file list", cline.Fname("files.lis", hcam.LIST)
)
first = 1
# bias frame (if any)
bias = cl.get_value(
"bias",
"bias frame ['none' to ignore]",
cline.Fname("bias", hcam.HCAM),
ignore="none",
)
# dark frame (if any)
dark = cl.get_value(
"dark",
"dark frame ['none' to ignore]",
cline.Fname("dark", hcam.HCAM),
ignore="none",
)
ngroup = cl.get_value(
"ngroup", "number of frames per median average group", 3, 1
)
ccdinf = spooler.get_ccd_pars(source, resource)
if len(ccdinf) > 1:
ccd = cl.get_value("ccd", "CCD(s) to process [0 for all]", "0")
if ccd == "0":
ccds = list(ccdinf.keys())
else:
ccds = ccd.split()
else:
ccds = list(ccdinf.keys())
# need to check that the default has the right number of items, if not
# overr-ride it
lowers = cl.get_default("lower")
if lowers is not None and len(lowers) != len(ccds):
cl.set_default("lower", len(ccds) * (5000,))
lowers = cl.get_value(
"lower",
"lower limits on mean count level for included flats, 1 per CCD",
len(ccds) * (5000,)
)
uppers = cl.get_default("upper")
if uppers is not None and len(uppers) != len(ccds):
cl.set_default("upper", len(ccds) * (50000,))
uppers = cl.get_value(
"upper",
"lower limits on mean count level for included flats, 1 per CCD",
len(ccds) * (50000,)
)
clobber = cl.get_value(
"clobber", "clobber any pre-existing files on output", False
)
output = cl.get_value(
"output",
"output average",
cline.Fname(
"hcam", hcam.HCAM, cline.Fname.NEW if clobber else cline.Fname.NOCLOBBER
)
)
# inputs done with.
if server_or_local or bias is not None or dark is not None:
print("\nCalling 'grab' ...")
args = [None, "prompt", source, "yes", resource]
if server_or_local:
args += [str(first), str(last),str(twait), str(tmax)]
args += [
"no",
"none" if bias is None else bias,
"none" if dark is None else dark,
"none", "none", "f32",
]
resource = hcam.scripts.grab(args)
# at this point 'resource' is a list of files, no matter the input
# method. 'fnames' below will be used to store still more temporaries
fnames = []
with CleanUp(
resource, fnames,
server_or_local or bias is not None or dark is not None
) as cleanup:
# Read all the files to determine mean levels (after bias
# subtraction) save the bias-subtracted, mean-level normalised
# results to temporary files
print("Reading all files in to determine their mean levels")
bframe, dframe = None, None
means = {}
for cnam in ccds:
means[cnam] = {}
# We might have a load of temporaries from grab, but we are about to
# make some more to save the normalised versions.
tdir = utils.temp_dir()
with spooler.HcamListSpool(resource) as spool:
for mccd in spool:
# here we determine the mean levels, store them
# then normalise the CCDs by them and save the files
# to disk
# generate the name to save to automatically
fd, fname = tempfile.mkstemp(suffix=hcam.HCAM, dir=tdir)
for cnam in ccds:
# its unlikely that flats would be taken with skips, but
# you never know. Eliminate them from consideration now.
ccd = mccd[cnam]
if ccd.is_data():
cmean = mccd[cnam].mean()
means[cnam][fname] = cmean
mccd[cnam] /= cmean
# write the disk, save the name, close the filehandle
fnames.append(fname)
mccd.write(fname)
os.close(fd)
# a bit of progress info
print(f"Saved processed flat to {fname}")
# now we go through CCD by CCD, using the first as a template
# for the window names in which we will also store the results.
template = hcam.MCCD.read(fnames[0])
for cnam, lower, upper in zip(ccds, lowers, uppers):
tccd = template[cnam]
# get the keys (filenames) and corresponding mean values
mkeys = np.array(list(means[cnam].keys()))
mvals = np.array(list(means[cnam].values()))
# chop down to acceptable ones
ok = (mvals > lower) & (mvals < upper)
mkeys = mkeys[ok]
mvals = mvals[ok]
# some more progress info
print("Found {:d} frames for CCD {:s}".format(len(mkeys), cnam))
if len(mkeys) == 0:
print(
(".. cannot average 0 frames;" " will skip CCD {:s}").format(cnam)
)
continue
elif len(mkeys) < ngroup:
print(
(
"WARNING: fewer than ngroup = {:d} frames"
" found. Output for CCD {:s} could be poor"
).format(ngroup, cnam)
)
nchunk = len(mkeys) // ngroup
if nchunk == 0:
nchunk = 1
# sort by mean value
isort = mvals.argsort()
mvals = mvals[isort]
mkeys = mkeys[isort]
# wsum used to sum all the eight factors to allow overall
# normalisation at the end of the loop
wsum = 0.0
for n in range(nchunk):
# loop through in chunks of ngroup at a time with a
# potentially larger group to sweep up the end ones.
n1 = ngroup * n
n2 = n1 + ngroup
if n == nchunk:
n2 = len(mkeys)
# load the CCDs of this group
ccdgroup = []
with spooler.HcamListSpool(list(mkeys[n1:n2]), cnam) as spool:
for ccd in spool:
ccdgroup.append(ccd)
# take median of the group to get rid of jumping
# stars. 'weight' used to weight the results when summing the
# results together. this stage is like the 'n' option of
# 'combine' except we have already cut out any junk frames and
# we have normalised the remainder
weight = mvals[n1:n2].sum()
wsum += weight
for wnam, wind in tccd.items():
# go through each window, building a list of all data
# arrays
arrs = [ccd[wnam].data for ccd in ccdgroup]
arr3d = np.stack(arrs)
# at this point, arr3d is a 3D array, with the first
# dimension (axis=0) running over the images. We take the
# median over this axis. The first time through we put
# this straight into the output Window. afterwards we add
# it in (with the appropriate weight)
if n == 0:
wind.data = weight * np.median(arr3d, axis=0)
else:
wind.data += weight * np.median(arr3d, axis=0)
# Normalise the final result to a mean = 1.
tccd /= wsum
# Add some history
tccd.head.add_history(
("result of makeflat on {:d}" " frames, ngroup = {:d}").format(
len(mkeys), ngroup
)
)
# Remove any CCDs not included to avoid impression of having done
# something to them
dcnams = []
for cnam in template.keys():
if cnam not in ccds:
dcnams.append(cnam)
for cnam in dcnams:
del template[cnam]
# write out
template.write(output, clobber)
print("\nFinal result written to {:s}".format(output))
print('makeflat finished')
class CleanUp:
"""
Context manager to handle temporary files
"""
def __init__(self, flist, fnames, temp):
self.flist = flist
self.fnames = fnames
self.temp = temp
def _sigint_handler(self, signal_received, frame):
print("\nmakeflat aborted")
sys.exit(1)
def __enter__(self):
signal.signal(signal.SIGINT, self._sigint_handler)
def __exit__(self, type, value, traceback):
if self.temp:
with open(self.flist) as fp:
for line in fp:
os.remove(line.strip())
os.remove(self.flist)
for fname in self.fnames:
if os.path.exists(fname):
os.remove(fname)
print('temporary files removed')
| true |
ad22cc995f67c82ab97d4e572311f56be8756113
|
Python
|
ladysilverberg/IN1000
|
/Oblig 2/egenoppgave.py
|
UTF-8
| 2,149 | 3.65625 | 4 |
[] |
no_license
|
# Oppgave:
# Lag et quiz-program som stiller spørsmål til brukeren, sjekker om de er
# riktige og teller poeng.
def still_sporsmaal(tekst):
if type(tekst) == str:
print("---------------------------")
print(tekst)
return input()
else:
print("Sporsmaalet er ikke en streng-variabel!")
exit()
def print_svar(svar):
print("Du svarte feil! (Det er nå du går i et hjørne og gråter litt c': )")
print("Riktig svar var: " + svar)
print("-------------------------------")
poeng = 0
print("Velkommen til det fantastiske quizprogrammet!")
if still_sporsmaal("Hvordan skrives 'God morgen' på japansk (roumaji)").lower() == "ohayou":
poeng += 1
print("Du svarte riktig, men dette var jo ikke så vanskelig?")
else:
print_svar("ohayou")
if still_sporsmaal("Hvilken 19-åring ble brent på bålet for hekseri i 1431?").lower() == "jeanne d'arc":
poeng += 1
print("Du svarte riktig! Slightly stolt av deg c':")
else:
print_svar("jeanne d'arc")
if still_sporsmaal("Hvilken spillfigure er kjent for å bruke sverdet 'parallell falchion'?").lower() == 'lucina':
poeng += 1
print("Du svarte riktig!")
else:
print_svar("lucina")
if still_sporsmaal("Gitt at du er intern, hvor mye koster en dobbel latte i Escape?").lower() == '13':
poeng += 1
print("Du svarte riktig!")
else:
print_svar('13')
if still_sporsmaal("Hvor mange er det i kø på kundeservice hos Telenor akkurat nå?").lower == "så mange at du legger på":
poeng += 1
print("Du svarte riktig!")
else:
print_svar("så mange at du legger på")
if still_sporsmaal("Smaker ananas godt?").lower() == "nei! ananas er heresy så til de grader og kan brenne i helvete!":
poeng += 1
print("Ikke bare svarte du riktig, men du har også veldig god matsans!")
else:
print_svar("nei! ananas er heresy så til de grader og kan brenne i helvete!")
print("-------------------------------")
print("Du fikk " + str(poeng) + " poeng!")
if poeng == 0:
print("Dette var jo ikke så bra...")
elif poeng >= 4:
print("Great!")
elif poeng >= 2:
print("Du gjorde vel en fair jobb, I guess?")
| true |
d5dcf19a5cda95c3277cb1f23d93f0fccad685ed
|
Python
|
juntakahashi777/PersonalWebpage
|
/server.py
|
UTF-8
| 849 | 2.5625 | 3 |
[] |
no_license
|
from flask import *
app = Flask(__name__)
import os
import json
@app.route("/delete")
def delete_url():
jsonFile = open("myWebsites.json", "r");
data = json.load(jsonFile);
index = request.args.get('index');
data["links_1"].pop(int(index)-1);
jsonFile = open("myWebsites.json", "w");
json.dump(data, jsonFile);
jsonFile.close();
return redirect(url_for('homepage'))
@app.route("/", methods=['GET', 'POST'])
def homepage():
jsonFile = open("myWebsites.json", "r");
data = json.load(jsonFile);
if request.method == 'POST':
new_url = request.form['url'];
entry = {"url": new_url}
data["links_1"].append(entry);
with open("myWebsites.json", mode='w') as jsonFile:
json.dump(data, jsonFile);
jsonFile.close();
return render_template('index.html', links_1=data["links_1"]);
if __name__ == "__main__":
app.run(debug=True)
| true |
6278262bade9f0eb966089c8546e27003676a5e0
|
Python
|
samuelsandoval1/CSSI-2019
|
/Python/make-a-short-story/mystory.py
|
UTF-8
| 382 | 3.28125 | 3 |
[] |
no_license
|
print "MAD LIBS"
noun1 = raw_input("Enter a noun: ")
adjective1 = raw_input("Enter an adjective: ")
noun2 = raw_input("Enter a noun: ")
noun3 = raw_input("Enter a noun: ")
verb1 = raw_input("Enter a verb ending in -ingx: ")
print ("The " + noun1 + " hopped over a " + adjective1 + noun2 +". Then the " +
noun3 + "decided to stop being so and take up a hobby: " + verb1 + ". ")
| true |
6bd16d725bc615d9279eda923b5f5db4c416468e
|
Python
|
BaymaxBei/data-structure
|
/str/find_largest_substr.py
|
UTF-8
| 759 | 3.578125 | 4 |
[] |
no_license
|
'''
找到字符串中的最大不重复子串,返回长度
'''
def find_longest_unrepeat_substr(string):
max_len = 0
start_index = 0
num_dict = {}
index_dict = {}
for i, s in enumerate(string):
s_num = num_dict.get(s, 0)
if s_num>0:
s_index = index_dict[s]
for j in range(start_index, s_index):
num_dict[string[j]] = 0
max_len = max(max_len, i-start_index)
start_index = s_index + 1
index_dict[s] = i
else:
num_dict[s] = 1
index_dict[s] = i
max_len = max(max_len, i-start_index+1)
return max_len
if __name__ == '__main__':
string = 'abccaddefagr'
print(find_longest_unrepeat_substr(string))
| true |
ad4b42d4d5c050b8cba325e17e017bfee49a6cd5
|
Python
|
haroldhyun/Algorithm
|
/Machine Learning/k-means.py
|
UTF-8
| 1,589 | 3.046875 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 8 17:45:52 2021
@author: Harold
"""
import numpy as np
def km_assignment_step(data, Mu):
""" Compute K-Means assignment step
Args:
data: a NxD matrix for the data points
Mu: a DxK matrix for the cluster means locations
Returns:
R_new: a NxK matrix of responsibilities
"""
N, D = data.shape # Number of datapoints and dimension of datapoint
K = Mu.shape[1] # number of clusters
r = np.zeros((N, K))
for k in range(K):
# r[:, k] = ...
r[:, k] = np.linalg.norm(data - np.array([Mu[:, k], ] * N), axis=1)**2
# arg_min = ... # argmax/argmin along dimension 1
# axis = 1 -> by rows
arg_min = np.argmin(r, axis = 1)
# R_new = ... # Set to zeros/ones with shape (N, K)
# R_new[..., ...] = 1 # Assign to 1
R_new = np.zeros((N,K))
R_new[np.array(range(N)), arg_min] = 1
return R_new
def km_refitting_step(data, R, Mu):
""" Compute K-Means refitting step.
Args:
data: a NxD matrix for the data points
R: a NxK matrix of responsibilities
Mu: a DxK matrix for the cluster means locations
Returns:
Mu_new: a DxK matrix for the new cluster means locations
"""
N, D = data.shape # Number of datapoints and dimension of datapoint
K = Mu.shape[1] # number of clusters
# axis = 0 will fix the column
Mu_new = np.dot(data.T, R)/np.sum(R, axis = 0)
return Mu_new
| true |
c802304debff7ecf34d0b84b313898b37c245484
|
Python
|
rchenhyy/whatever
|
/iter/my_iterator.py
|
UTF-8
| 349 | 3.765625 | 4 |
[] |
no_license
|
class MyIterator:
def __init__(self, start, end):
self._next = start
self._end = end
def __iter__(self):
return self
def next(self):
if self._next > self._end:
raise StopIteration()
n = self._next
self._next += 1
return n
for i in MyIterator(0, 10000):
print i
| true |
9d37237e048025389ab9add585fcd8969b28aba7
|
Python
|
santigo171/learning-python
|
/loops/potency2.py
|
UTF-8
| 435 | 4.0625 | 4 |
[] |
no_license
|
# ciclo while
def run(number, limit):
potency = 0
result = number ** potency
while result <= limit:
print(str(number) + "^" + str(potency) +
" = " + str(result))
potency = potency + 1
result = number ** potency
if __name__ == "__main__":
number = int(input("Qué número quieres elevar? "))
limit = int(input("A que número limite quieres llegar? "))
run(number, limit)
| true |
60315d32f887dad49689121b2350d25729fbdb48
|
Python
|
elados93/AI_Ex1
|
/BFS.py
|
UTF-8
| 1,170 | 3.53125 | 4 |
[] |
no_license
|
from TilePuzzleLogic import backtracking
class BFS_Seaerch(object):
"""
BFS_Seaerch using to search level by level in a given graph.
The search method using the logic given in the constructor.
"""
def __init__(self, logic):
self._logic = logic
def search(self, init_state):
"""
Search the given state with the current logic in order to find path for the goal.
:param init_state: The state of the puzzle.
:return: Tuple of: path of operators, the number of states we visited, 0.
"""
open_list = []
count = 0
open_list.append(init_state)
while len(open_list):
current = open_list.pop(0)
# count all the states we popped from the queue
count += 1
if self._logic.is_goal_state(current):
return backtracking(current), str(count), '0'
children = self._logic.next_states(current)
for child in children:
open_list.append(child)
# BFS is complete, but if there is no answer raise an exception.
raise Exception('Puzzle is not solvable!')
| true |
34993894bb9026746701ae562257c5e4c9b81030
|
Python
|
reywridyll/Soteria
|
/HaarAnalyser.py
|
UTF-8
| 2,805 | 2.640625 | 3 |
[] |
no_license
|
#- import the necessary packages -#
import numpy as np
import cv2
# Load video
video = cv2.VideoCapture(f'data/distance_test.mp4')
# Load cascade
body_cascade = cv2.CascadeClassifier('haar/cascade.xml')
# Configure blob detection
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 200
params.maxThreshold = 255
# Filter by Area
params.filterByArea = True
params.minArea = 30
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.35
# Filter by Convexity
params.filterByConvexity = False
params.minConvexity = 0.4
# Filter by Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.1
# Initialize blob detection
detector = cv2.SimpleBlobDetector_create(params)
# Set kernel
kernel = np.array([[1,1,1],
[0,0,0],
[1,1,1]], np.uint8)
# Skip early frames for demo purposes
skip = input("Skip first N frames: ")
counter = 0;
while(video.isOpened()):
counter += 1
ret, im = video.read()
if counter > int(skip):
# Convert image to grey scale
grey = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# Apply denoising filter
grey_denoised = cv2.fastNlMeansDenoising(grey)
# Detect objects
bodies = body_cascade.detectMultiScale(grey_denoised)
# Check if cascade detected something
if len(bodies) > 0 :
for body in bodies:
x, y, w ,h = body
# Draw a bounding box and label on the image
im = cv2.rectangle(im,(x,y ,w , h),(36,255,12),2)
cv2.putText(im, "Target", (x, y- 10), cv2.FONT_HERSHEY_SIMPLEX , 0.9, (36,255,12), 2)
# Fall back to blob detection if cascade has no detections
else:
# Detect the lines using canny
line_frame = cv2.Canny(grey_denoised, 100,150)
# Fill the lines using morphology
filled_frame = cv2.morphologyEx(line_frame, cv2.MORPH_CLOSE, kernel, iterations=5)
# Apply thresholding to make blobs easier to detect
retval, threshold = cv2.threshold(filled_frame, 230, 255, cv2.THRESH_BINARY_INV)
# Detect the blobs from the processes image.
keypoints = detector.detect(threshold)
im = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Define a key to break out of the loop
key = cv2.waitKey(1)
if key == ord('q'):
break
# Display frame
cv2.imshow('img',im)
else:
print("Frame skipped!")
video.release()
cv2.destroyAllWindows()
| true |
e5ebcef20cb0a046c1f4cfa86276e043b11fd39c
|
Python
|
DineshNadar95/FE-520
|
/Assignment-2/Temp.py
|
UTF-8
| 416 | 3.25 | 3 |
[] |
no_license
|
# def is_palindrome(x,y):
# # add your code here
# print(x)
def count_char(x):
count = 0
dict1 = {}
for i in x:
if i in dict1:
count = dict1[i]
count = count + 1
dict1[i] = count
else:
dict1[i] = 1
return dict1
print(count_char("anagram"))
# print(tempArr)
# print(is_palindrome(121))
| true |
a4cba11c1e63734363deaac1bb1047b4a0007782
|
Python
|
dduong42/ud120-projects
|
/naive_bayes/nb_author_id.py
|
UTF-8
| 844 | 2.5625 | 3 |
[] |
no_license
|
#!/usr/bin/python
"""
this is the code to accompany the Lesson 1 (Naive Bayes) mini-project
use a Naive Bayes Classifier to identify emails by their authors
authors and labels:
Sara has label 0
Chris has label 1
"""
import os.path
import sys
from time import time
BASE = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(BASE, 'tools'))
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
#########################################################
### your code goes here ###
#########################################################
| true |
2745f8d77c81d803424fa5904367e7728d5a7f86
|
Python
|
iCodeIN/algorithms-9
|
/solutions/Dynamic Programming/UniquePathsInGrid.py
|
UTF-8
| 1,142 | 3.25 | 3 |
[] |
no_license
|
## Website:: Interviewbit
## Link:: https://www.interviewbit.com/problems/unique-paths-in-a-grid/
## Topic:: Dynamic Programming
## Sub-topic::
## Difficulty:: Easy
## Approach:: f(i, j) = f(i-1, j) + f(i, j-1)
## Time complexity:: O(N^2)
## Space complexity:: O(N)
## Notes::
## Bookmarked:: No
class Solution:
# @param A : list of list of integers
# @return an integer
def uniquePathsWithObstacles(self, A):
m = len(A)
n = len(A[0])
num_paths = [[0] * n for _ in range(m)]
for ix in range(m):
for jx in range(n):
if A[ix][jx] == 1:
num_paths[ix][jx] = 0
continue
if ix == 0 and jx == 0:
num_paths[ix][jx] = 1
continue
if ix == 0:
num_paths[ix][jx] = num_paths[ix][jx-1]
continue
if jx == 0:
num_paths[ix][jx] = num_paths[ix-1][jx]
continue
num_paths[ix][jx] = num_paths[ix-1][jx] + num_paths[ix][jx-1]
return num_paths[-1][-1]
| true |
c4ceda67c10ab551545ef1ec46954f0375952abc
|
Python
|
usop7/Python_OOP_Projects
|
/Lectures/1031/annotation.py
|
UTF-8
| 123 | 2.71875 | 3 |
[] |
no_license
|
# it's not enforced. it's just for developers communication.
def add(num1: int, num2: int) -> int:
return num1 + num2
| true |
7a93e5e9a68014df3d2586a723a189f1f76388a6
|
Python
|
cmquintanilla/escuelas-database
|
/Evaluacion01/functionsCE.py
|
UTF-8
| 4,271 | 3.453125 | 3 |
[
"Apache-2.0"
] |
permissive
|
import mongoDB as MongoDB
def welcome():
print(120 * '*')
print("* Welcome to Evaluacion01, this program was created for learning purposes and it's a CRUD for School Center Management *")
print("* For this scenario we are using MongoDB and ATLAS for the persistence of the Data", 35 * ' ', '*')
print(120 * '*')
print("\n")
def menu():
print("Choose an option please:")
print("MENU")
print("1- Insert a new School Center")
print("2- Visualize a School Center")
print("3- Update a School Center")
print("4- Delete a School Center")
print("5- Exit")
def optionValidation(op):
try:
option = int(op)
return option
except ValueError:
print("This is not a correct option!!!!")
return -1
def insertSchool(DB: MongoDB):
name = input("Type School Identification Name: ")
department = input("Type the Department's Name where is located: ")
municipality = input("Type the Municipalitie's Name where is located: ")
school = {"_id": "", "Nombre": name,
"Departamento": department, "Municipio": municipality}
DB.insert(school)
print("Successfully inserted!")
print("\n")
def visualizeSchools(DB: MongoDB):
print("What do you whant to do?")
print("1-Visualize all the Schools")
print("2-Search School by Name")
print("3-Search School by ID Number")
op = input("Please type your option: ")
if op == "1":
print("Here all the Schools registered...")
print("\n")
myquery = {}
DB.find(myquery)
for x in DB.document:
print("ID: ", x["_id"])
print("Name: ", x["Nombre"])
print("Department: ", x["Departamento"])
print("Municipality: ", x["Municipio"])
print(30 * '*')
elif op == "2":
name = input("Type the name of the School: ")
myquery = {"Nombre": {"$regex": name}}
DB.find(myquery)
# if DB.document
print("Here what it was found...")
print("\n")
for x in DB.document:
print("ID: ", x["_id"])
print("Name: ", x["Nombre"])
print("Department: ", x["Departamento"])
print("Municipality: ", x["Municipio"])
print(30 * '*')
elif op == "3":
ID = input("Type the ID number of the School: ")
myquery = {"_id": ID}
DB.find(myquery)
# if DB.document
print("Here what it was found...")
print("\n")
for x in DB.document:
print("ID: ", x["_id"])
print("Name: ", x["Nombre"])
print("Department: ", x["Departamento"])
print("Municipality: ", x["Municipio"])
else:
print("You typed a non existent option")
print("\n")
def updateSchool(DB: MongoDB):
school = {}
# Asking for the ID
ID = input("Type School ID Number you want to update: ")
if optionValidation(ID) != -1:
myquery = {"_id": ID}
DB.find(myquery)
if DB.document.count() == 0:
print("Sorry there is no School with that ID number!!!")
return
else:
return
# Asking for the Name
name = input("Type New School Identification Name: ")
if name != None:
school["Nombre"] = name
# Asking for the Department
department = input("Type the New Department's Name where is located: ")
if department != None:
school["Departamento"] = department
# Asking for the Municipality
municipality = input(
"Type the New Municipalitie's Name where is located: ")
if municipality != None:
school["Municipio"] = municipality
school = {"$set": school}
DB.update(myquery, school)
print("Successfully updated!")
print("\n")
def deleteSchool(DB: MongoDB):
school = {}
# Asking for the ID
ID = input("Type School ID Number you want to delete: ")
if optionValidation(ID) != -1:
myquery = {"_id": ID}
DB.find(myquery)
if DB.document.count() == 0:
print("Sorry there is no School with that ID number!!!")
return
else:
return
school["_id"] = ID
DB.delete(school)
print("Successfully deleted!")
print("\n")
| true |
134cfc5b2e6e7596235cb917c01a80bd38756a99
|
Python
|
HarshitaSingh97/FetchGoogleTrends
|
/PygTrends.py
|
UTF-8
| 6,095 | 2.5625 | 3 |
[] |
no_license
|
from pytrends.request import TrendReq
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pandas as pd, requests
class fetchtrends:
latlng="0,0"
def __init__(self,keywords,latitude=None,longitude=None,time=None):
self.keywords = keywords#['asthma','air']
if time is None:
now = datetime.now()
three_yrs_ago = now - relativedelta(years=3)
default = three_yrs_ago.strftime("%Y-%m-%d") + " " + now.strftime("%Y-%m-%d")
self.time = default
else:
self.time = time
if latitude is None:
self.latitude = "0"
else :
self.latitude = latitude
if longitude is None:
self.longitude = "0"
else :
self.longitude = longitude
self.latlng=self.latitude+","+self.longitude
#retreive address of given latitude and longitude. InputFormat: 'latitude,longitude' OutputFormat:JSON
def reverse_geocode(self,latlng):
result = {}
print(latlng)
url = 'https://maps.googleapis.com/maps/api/geocode/json?latlng={}'
request = url.format(latlng)
# handling empty response
while(1):
data = requests.get(request).json()
if len(data['results']) > 0:
result = data['results'][0]
break
return result
#Parse 'result' to retrieve Country
def parse_country(self,geocode_data):
if (not geocode_data is None) and ('address_components' in geocode_data):
for component in geocode_data['address_components']:
if 'country' in component['types']:
return component['short_name']
return None
#Parse 'result' to retrieve City
def parse_city(self,geocode_data):
if (not geocode_data is None) and ('address_components' in geocode_data):
for component in geocode_data['address_components']:
#if 'location' in component['types']:
#return component['short_name']
#elif 'postal_town' in component['types']:
#return component['short_name']
#elif 'administrative_area_level_2' in component['types']:
if 'administrative_area_level_1' in component['types']:
return component['short_name']
return None
def fetch(self):
gtgeo=self.reverse_geocode(self.latlng)
city=self.parse_city(gtgeo)
country=self.parse_country(gtgeo)
if(len(city)==2 and len(country)==2):
gtcode=country + "-" + city
elif(len(city)!=2 and len(country)==2):
gtcode=country
else:
gtcode=""
pytrends = TrendReq(hl='en-US', tz=360)
pytrends.build_payload(self.keywords, cat=0, timeframe=self.time, geo=gtcode, gprop='')
return pytrends.interest_over_time()
'''
from pytrends.request import TrendReq
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pandas as pd, requests
class fetchtrends:
latlng="0,0"
def __init__(self,keywords,latitude=None,longitude=None,time=None):
self.keywords = keywords#['asthma','air']
if time is None:
now = datetime.now()
three_yrs_ago = now - relativedelta(years=3)
default = three_yrs_ago.strftime("%Y-%m-%d") + " " + now.strftime("%Y-%m-%d")
self.time = default
else:
self.time = time
if latitude is None:
self.latitude = "0"
else :
self.latitude = latitude
if longitude is None:
self.longitude = "0"
else :
self.longitude = longitude
self.latlng=self.latitude+","+self.longitude
#retreive address of given latitude and longitude. InputFormat: 'latitude,longitude' OutputFormat:JSON
def reverse_geocode(self,latlng):
result = {}
print(latlng)
url = 'https://maps.googleapis.com/maps/api/geocode/json?latlng={}'
request = url.format(latlng)
# handling empty response
while(1):
data = requests.get(request).json()
if len(data['results']) > 0:
result = data['results'][0]
break
return result
#Parse 'result' to retrieve Country
def parse_country(self,geocode_data):
if (not geocode_data is None) and ('address_components' in geocode_data):
for component in geocode_data['address_components']:
if 'country' in component['types']:
return component['short_name']
return None
#Parse 'result' to retrieve City
def parse_city(self,geocode_data):
if (not geocode_data is None) and ('address_components' in geocode_data):
for component in geocode_data['address_components']:
#if 'location' in component['types']:
#return component['short_name']
#elif 'postal_town' in component['types']:
#return component['short_name']
#elif 'administrative_area_level_2' in component['types']:
#return component['short_name']
if 'administrative_area_level_1' in component['types']:
return component['short_name']
return None
def fetch(self):
gtgeo=self.reverse_geocode(self.latlng)
city=self.parse_city(gtgeo)
country=self.parse_country(gtgeo)
if(len(city)==2 and len(country)==2):
gtcode=country + "-" + city
elif(len(city)!=2 and len(country)==2):
gtcode=country
else:
gtcode=""
pytrends = TrendReq(hl='en-US', tz=360)
pytrends.build_payload(self.keywords, cat=0, timeframe=self.time, geo=gtcode, gprop='')
return pytrends.interest_over_time()
'''
| true |
2bd77734dde340cc4d52f1e42b3935e390e94ced
|
Python
|
hidoos/learn-python
|
/practise/small.py
|
UTF-8
| 109 | 3.109375 | 3 |
[] |
no_license
|
def small(*args):
sum = 0
for i in args:
sum += i
return sum
print small(1,2,3,4,5,10)
| true |
9ce2c1cc2089488e1a32286d6385904e3329b2e2
|
Python
|
aarondizele/python
|
/learning.py
|
UTF-8
| 607 | 3.34375 | 3 |
[] |
no_license
|
import sys
# d = {'a': 'Maman', 'b': 'Papa', 'c':'Grand Father', 'd': 'Grand Mother', 'e': 'Son', 'f': 'Daughter'}
# for k in sorted(d.keys()):
# print('Key '+k.upper()+' -> '+d[k])
# print(d.items()[0])
def File(filename):
f = open(filename, 'rU')
for line in f:
a = line.split()
for word in a:
if(word > 1):
word += 1
print(word)
# print(line)
# lines = f.readlines()
text = f.read()
print(text)
f.close()
def main():
File(sys.argv[1])
if __name__ == '__main__':
main()
| true |
635440fa976f5ef5bf9060974e87f2920ea48738
|
Python
|
jfarid27/mcmc-NeuralNetworks
|
/titanicKaggle.py
|
UTF-8
| 1,252 | 2.921875 | 3 |
[] |
no_license
|
import procedures.crossValidate as cV
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import procedures.neuralNetworks as NN
##Defaults
predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"]
target = "Survived"
class Titanic():
def dataClean(self):
data = pd.read_csv("./data/kaggle/titanic/train.csv")
data.loc[data["Sex"] == "male", "Sex"] = 0
data.loc[data["Sex"] == "female", "Sex"] = 1
data.loc[pd.isnull(data["Embarked"]), "Embarked"] = 0
data.loc[data["Embarked"] == "S", "Embarked"] = 0
data.loc[data["Embarked"] == "C", "Embarked"] = 1
data.loc[data["Embarked"] == "Q", "Embarked"] = 2
data.loc[pd.isnull(data["Age"]), "Age"] = data["Age"].median()
data.loc[pd.isnull(data["Fare"]), "Fare"] = data["Fare"].median()
return data
def randomForest(self, predictors=predictors, target=target):
alg = RandomForestClassifier(random_state=1, n_estimators=20, min_samples_split=2, min_samples_leaf=1)
cleanData = self.dataClean()
score = cV.kFold().analyze(cleanData, predictors, target, alg)
return score
if (__name__ == "__main__"):
analysis = Titanic()
rF = analysis.randomForest()
print("Random Forest Score:\n")
print rF
print("Random Forest Score:\n")
print rF
| true |
a5af7b264c97b0e0ee14296d5461216bd3288021
|
Python
|
Ritapanda9009/htrc-feature-reader
|
/htrc_features/transformations.py
|
UTF-8
| 1,639 | 2.6875 | 3 |
[] |
no_license
|
import numpy as np
def chunk_to_wem(chunk_tl, model, vocab=None, stop=True, log=True, min_ncount=10):
''' Take a file that has ['token', 'count'] data and convert to a WEM vector'''
if 'token' in chunk_tl.columns and not 'token' in chunk_tl.index.names:
chunk_tl = chunk_tl.set_index('token')[['count']]
elif 'lowercase' in chunk_tl.columns and not 'lowercase' in chunk_tl.index.names:
chunk_tl = chunk_tl.set_index('lowercase')[['count']]
n_dim = 300
placeholder = np.array(n_dim * [None])
tl = chunk_tl.copy() #Avoidable?
tcolname = 'token' if 'token' in tl.index.names else 'lowercase'
tl.index = tl.index.get_level_values(tcolname)
if not vocab:
vocab = set(model.vocab.keys())
if stop:
from spacy.lang.en.stop_words import STOP_WORDS
vocab = vocab.difference(STOP_WORDS)
# Cross-reference the page or volume vocab with the words in the model
doc_vocab = set(tl.index.get_level_values(tcolname))
joint_vocab = list(vocab.intersection(doc_vocab))
if len(joint_vocab) <= min_ncount:
return placeholder
all_vecs = model[joint_vocab]
# The counts will be used as weights for an average
counts = tl.loc[joint_vocab]['count'].values
if log:
counts = np.log(1+counts)
if counts.shape[0] != all_vecs.shape[0]:
raise BaseException("Counts and all_vecs don't align. Like, this means there are duplicated tokens in the data"
" e.g. Passing in a dataframe with counts for multiple pages/chunks")
doc_wem = np.dot(all_vecs.T, counts)
return doc_wem
| true |
8d33d0e9b842f8fb5deb6147c080f159dffbf9e5
|
Python
|
mellykath/CodingChallenges
|
/unit_converter.py
|
UTF-8
| 542 | 4.03125 | 4 |
[] |
no_license
|
type_of_unit_being_entered = input("Enter type of unit being converted ie celsius, farenheit")
type_of_unit_being_converted_to = input("Enter type of unit to convert to")
value_of_input = input("Enter value")
if type_of_unit_being_entered == "celsius" and type_of_unit_being_converted_to == "farenheit":
value_of_output = (float(value_of_input) * 1.8) +32
if type_of_unit_being_entered == "farenheit" and type_of_unit_being_converted_to == "celsius":
value_of_output = (float(value_of_input) -32) * 0.5556
print (value_of_output)
| true |
9c365a3a57f2d21e8ff93f487c6c16fccf7c3d9b
|
Python
|
mcneel/rhino-developer-samples
|
/rhinocommon/snippets/from_rhinocommon/py/ex_addlayer.py
|
UTF-8
| 1,460 | 3.109375 | 3 |
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
import Rhino
import scriptcontext
import System.Guid, System.Drawing.Color
def AddLayer():
# Cook up an unused layer name
unused_name = scriptcontext.doc.Layers.GetUnusedLayerName(False)
# Prompt the user to enter a layer name
gs = Rhino.Input.Custom.GetString()
gs.SetCommandPrompt("Name of layer to add")
gs.SetDefaultString(unused_name)
gs.AcceptNothing(True)
gs.Get()
if gs.CommandResult()!=Rhino.Commands.Result.Success:
return gs.CommandResult()
# Was a layer named entered?
layer_name = gs.StringResult().Trim()
if not layer_name:
print "Layer name cannot be blank."
return Rhino.Commands.Result.Cancel
# Is the layer name valid?
if not Rhino.DocObjects.Layer.IsValidName(layer_name):
print layer_name, "is not a valid layer name."
return Rhino.Commands.Result.Cancel
# Does a layer with the same name already exist?
layer_index = scriptcontext.doc.Layers.Find(layer_name, True)
if layer_index>=0:
print "A layer with the name", layer_name, "already exists."
return Rhino.Commands.Result.Cancel
# Add a new layer to the document
layer_index = scriptcontext.doc.Layers.Add(layer_name, System.Drawing.Color.Black)
if layer_index<0:
print "Unable to add", layer_name, "layer."
return Rhino.Commands.Result.Failure
return Rhino.Commands.Result.Success
if __name__=="__main__":
AddLayer()
| true |
24876d25612203b05d6e21e6d893217a27a47718
|
Python
|
Ayu-99/python2
|
/session22(a).py
|
UTF-8
| 1,062 | 3.296875 | 3 |
[] |
no_license
|
# Asynchronous -> Parallel(May Lead to mixed output)
# Synchronous -> When there are multiple threads and they are accessing the same shared object, then we need
# synchronisation
import threading
import time
# Lock Object
lock = threading.Lock()
class Printer:
def printDocuments(self, docName, times):
lock.acquire()
for i in range(1, times+1):
print(">> Printing {} Copy#{}".format(docName, i))
time.sleep(1)
lock.release()
class Desktop(threading.Thread):
def attachPrinter(self, printer):
self.printer = printer
def run(self):
self.printer.printDocuments("LearningPython.pdf", 10)
class Laptop(threading.Thread):
def attachPrinter(self, printer):
self.printer = printer
def run(self):
self.printer.printDocuments("LearningJava.pdf", 10)
printer = Printer()
# printer.printDocuments("LearningPython.pdf", 10)
desktop = Desktop()
desktop.attachPrinter(printer)
desktop.start()
laptop = Laptop()
laptop.attachPrinter(printer)
laptop.start()
| true |
cd6ace2300b75f43b07860fd84070d17944ed0f8
|
Python
|
iChaos26/DP-Study
|
/Observer/Observer.py
|
UTF-8
| 299 | 2.890625 | 3 |
[] |
no_license
|
class Subject:
def __init__(self):
self.__observers = []
def register(self, observer):
self.__observers.append(observer)
def notifyAll(self, *args, **kwargs):
for observer in self.__observers:
observer.notify(self, *args, **kwargs)
| true |
9d16b6db58ec2f3c9312a0b4d47f90ec13d7c7f6
|
Python
|
mola1129/atcoder
|
/contest/abc157/D.py
|
UTF-8
| 337 | 2.921875 | 3 |
[
"MIT"
] |
permissive
|
n, m, k = map(int, input().split())
friend = [[] for _ in range(n)]
for _ in range(m):
a, b = map(int, input().split())
friend[a - 1].append(b - 1)
friend[b - 1].append(a - 1)
block = [[] for _ in range(n)]
for _ in range(k):
c, d = map(int, input().split())
block[c - 1].append(d - 1)
block[d - 1].append(c - 1)
| true |
b95eaa738c17ae8de5223cbe5b88f05bb799e22b
|
Python
|
muhammedameen/Inf_Sec_RUG
|
/set1/exercise3.py
|
UTF-8
| 275 | 3.375 | 3 |
[] |
no_license
|
import sys
from string import ascii_lowercase as ALPHABET
def shift(message, offset):
trans = str.maketrans(ALPHABET, ALPHABET[offset:] + ALPHABET[:offset])
return message.lower().translate(trans)
print(shift(input("Input message you would like encrypted:\n"), 19))
| true |
3e9c01dd8348d4d0697c2ff6f4aa4f8636f680ef
|
Python
|
dabingsun/MS-DHCP
|
/Experiments/UNSWNB15/LSTM/LSTM10.py
|
UTF-8
| 7,081 | 2.6875 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 3 08:12:12 2019
@author: dabing
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time as run_time
from sklearn.metrics import confusion_matrix
from loadData import trainFeatures,trainLabels_10,testFeatures,testLabels_10
start_time = run_time.time()
#参数初始化
rnn_input = 196
categories_num = 10
batch_size = 10
time_length= 50
rnn_layer = 1
rnn_hidden = 120
lr = 0.01
x_train = trainFeatures
x_test = testFeatures
train_y,test_y = trainLabels_10,testLabels_10
w_out = tf.get_variable("w_out",shape = [rnn_hidden,categories_num],dtype=tf.float32,initializer=tf.zeros_initializer(dtype=tf.float32))
b_out = tf.get_variable("b_out",shape=[categories_num],dtype=tf.float32,initializer=tf.zeros_initializer(dtype=tf.float32))
#自定义参数初始化函数,标准的均匀均匀分布
def xavier_init(fan_in,fan_out,constant = 1):
low = -constant * np.sqrt(6.0/(fan_in + fan_out))
high = constant * np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in,fan_out),minval=low,maxval=high,dtype = tf.float32)
class RNNModel(object):
def __init__(self,is_train, batch_size, time_steps):
# tf.reset_default_graph()
self.is_train = is_train
self.batch_size = batch_size
self.x =tf.placeholder(dtype = tf.float32,shape=[None,rnn_input],name='x')
self.y_ = tf.placeholder(dtype=tf.float32,shape=[None,categories_num],name="y_")
self.rnn_input = tf.reshape(self.x,shape =(-1,time_steps,rnn_input))
if self.is_train:
self.temp_cell = ([tf.contrib.rnn.DropoutWrapper(self.get_cell(),output_keep_prob=1) for i in range(rnn_layer)])
else:
self.temp_cell = ([self.get_cell() for i in range(rnn_layer)])
self.cell = tf.nn.rnn_cell.MultiRNNCell(self.temp_cell,state_is_tuple=True)
self.is_static = self.cell.zero_state(batch_size,dtype=tf.float32)
self.output,self.static = tf.nn.dynamic_rnn(self.cell,self.rnn_input,initial_state=self.is_static)
#将RNN的输出数据转为softmax的输入数据(三维转二维)
self.rnn_output = tf.reshape(tf.concat(self.output,1),(-1,rnn_hidden))
self.logists = tf.nn.softmax(tf.add(tf.matmul(self.rnn_output,w_out),b_out))
#定义损失函数
self.loss = tf.reduce_mean(-tf.reduce_sum(self.y_ * tf.log(self.logists)+(1-self.y_)*tf.log(1-self.logists),axis=1))
#定义优化函数及学习率
# self.train_op = tf.train.AdamOptimizer(learning_rate=lr ).minimize(self.loss)
self.train_op = tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(self.loss)
#定义训练精度
self.correct_prediction = tf.equal(tf.argmax(self.logists,1),tf.argmax(self.y_,1))
#tf.cast 将之前bool值转为float32,reduce_mean求平均
self.acc= tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.y_p = tf.argmax(self.logists,1)
def get_cell(self):
return tf.nn.rnn_cell.BasicLSTMCell(num_units=rnn_hidden,activation = tf.nn.sigmoid)
#训练时随机获取batch数据
def get_bolck_data(data, data_y, time_length, batch_size):
end_random_index = (len(data) // time_length) - batch_size
start_index = np.random.randint(0,end_random_index)
return data[start_index*time_length:(start_index+batch_size)*time_length],data_y[start_index*time_length:(start_index+batch_size)*time_length]
#数据集的迭代次数
epochs =100
display = 1
n_samples = int(trainFeatures.shape[0])
#调用读取数据模块的数据
with tf.Session() as sess:
with tf.variable_scope("model",reuse=None):
train_model = RNNModel(False,batch_size=batch_size, time_steps=time_length)
with tf.variable_scope("model",reuse=True):
test_model = RNNModel(False, batch_size=1, time_steps= x_test.shape[0])
sess.run(tf.global_variables_initializer())
epoch_list = []
train_cost_list = []
train_ac_list = []
test_cost_list = []
test_ac_list = []
cm_list = []
for epoch in range(epochs):
steps = 0
total_cost = 0
total_ac = 0
total_test_ac = 0
total_test_cost = 0
# train_x.shape[0]// (batch_size*time_length//2)
total_batch = int(n_samples / (batch_size*time_length))
for step in range(total_batch):
batch_x,batch_y = get_bolck_data(x_train,train_y,time_length,batch_size)
# x_train_a, y_train_a = get_data(train_x, train_y, time_length, batch_size, step)
batch_y = batch_y.reshape(-1,categories_num)
_, cost, ac = sess.run([train_model.train_op,train_model.loss,train_model.acc],
feed_dict={train_model.x:batch_x, train_model.y_:batch_y})
steps += 1
total_ac += ac
total_cost += cost
# #test+测试结果
test_y = test_y.reshape(-1,categories_num)
y_t = np.argmax(test_y,1)
total_test_cost,total_test_ac,y_p = sess.run([test_model.loss,test_model.acc,test_model.y_p],
feed_dict={test_model.x:x_test, test_model.y_:test_y})
cm = confusion_matrix(y_t, y_p)
if epoch % display == 0:
print("Epoch",'%04d'%(epoch+1),
"[ train_ac={0:.4f} train_cost={1:.4f} ] [ test_ac={2:.4f} test_cost={3:.4f} ]".format(
total_ac/steps,
total_cost/steps,
total_test_ac,
total_test_cost))
epoch_list.append(epoch)
train_cost_list.append(total_cost/steps)
train_ac_list.append(total_ac/steps)
test_ac_list.append(total_test_ac)
test_cost_list.append(total_test_cost)
cm_list.append(cm)
# 创建两行一列的图框,并获得第一行一个区域
ax1 = plt.subplot(2,1,1)
plt.plot(epoch_list,train_ac_list,color = 'green',label = "Train_acc")
plt.plot(epoch_list,test_ac_list,color = 'red',label = "Test+_acc")
plt.axis()
plt.legend()
plt.ylabel("accuracy")
plt.xlabel("epochs")
ax1.set_title("accuracy-epochs")
ax2 = plt.subplot(2,1,2)
plt.plot(epoch_list,train_cost_list,color = 'green',label = "Train_cost")
plt.plot(epoch_list,test_cost_list,color = 'red',label = "Test+_cost")
plt.axis()
plt.legend()
plt.ylabel("loss")
plt.xlabel("epochs")
ax2.set_title("loss-epochs")
# plt.savefig("./train_image/SAE_1.png")
plt.tight_layout()
plt.show()
print("maxAcc {}".format(max(test_ac_list)))
print("CM:{0}".format(cm_list[test_ac_list.index(max(test_ac_list))]))
print("lastCM:{0}".format(cm_list[-1]))
| true |
0de3b86e5d107b279cdef04ef23bb769473137b8
|
Python
|
ahmadrezare2/Summer-2021-Pre-Django
|
/Max,Min.py
|
UTF-8
| 256 | 3.75 | 4 |
[] |
no_license
|
list=[]
x1=int(input("Enter your first number: "))
x2=int(input("Enter your second number: "))
x3=int(input("Enter your third number: "))
list.append(x1),list.append((x2)),list.append((x3))
list.sort()
print("max is: ",list[-1])
print("min is: ",list[0])
| true |
ab77e6bfd0490f15efd6b3180561775914c73941
|
Python
|
lance-shi/TextEditorPython
|
/mainMenu.py
|
UTF-8
| 1,766 | 3.09375 | 3 |
[] |
no_license
|
from tkinter import *
from tkinter import filedialog
class MainMenuPanel:
def __init__(self, root, txtEdit):
self.root = root
self.txtEdit = txtEdit
self.mainMenu = Menu()
self.root.config(menu=self.mainMenu)
fileMenu = Menu()
self.mainMenu.add_cascade(label="File", menu=fileMenu)
fileMenu.add_command(label="New", command=self.onNew)
fileMenu.add_command(label="Open", command=self.onOpen)
fileMenu.add_command(label="Save", command=self.onSave)
fileMenu.add_command(label="Save As...", command=self.onSaveAs)
fileMenu.add_separator()
fileMenu.add_command(label="Exit", command=self.root.quit)
def onNew(self):
self.txtEdit.delete(1.0, END)
self.root.title("Simple Text Editor")
self.currentFilePath = ""
def onOpen(self):
filepath = filedialog.askopenfilename(
filetypes = [("Text Files", "*.txt"), ("All Files", "*.*")]
)
if not filepath:
return
self.txtEdit.delete(1.0, END)
with open(filepath, "r") as inputFile:
text = inputFile.read()
self.txtEdit.insert(END, text)
self.root.title(f"Simple Text Editor - {filepath}")
self.currentFilePath = filepath
print(self.currentFilePath)
def onSave(self):
print("Hello")
print(self.currentFilePath)
if self.currentFilePath != "":
with open(self.currentFilePath, "w") as outputFile:
text = self.txtEdit.get(1.0, END)
outputFile.write(text)
else:
onSaveAs()
def onSaveAs(self):
filepath = filedialog.asksaveasfilename(
defaultextension="txt",
filetypes=[("Text Files", "*.txt"), ("All Files", "*.*")]
)
if not filepath:
return
with open(filepath, "w") as outputFile:
text = self.txtEdit.get(1.0, END)
outputFile.write(text)
root.title(f"Simple Text Editor - {filepath}")
self.currentFilePath = filepath
| true |
2db8679736a0ed8948dc374237b31a2a86a5258c
|
Python
|
jpltechlimited/Edukatronic
|
/0007/armToChessBoardInterface.py
|
UTF-8
| 3,989 | 2.828125 | 3 |
[] |
no_license
|
from roboarm import Arm
import time
class ArmToChessBoardInterface:
def __init__(self):
self.arm = Arm()
self.vendor = 0x1267
self.bmRequestType = 0x40
self.bRequest = 6
self.wValue = 0x100
self.wIndex = 0
self.sleep_time = 1
self.BASE_RIGHT = 1,
self.BASE_LEFT = 2,
self.ELBOW_UP = 3,
self.ELBOW_DOWN = 4,
self.SHOULDER_FRONT = 5,
self.SHOULDER_BACK = 6,
self.WRIST_UP = 7,
self.WRIST_DOWN = 8
def __move_shoulder_down__(self, time_to_move):
if time_to_move != 0:
self.arm.shoulder.down(time_to_move)
time.sleep(self.sleep_time)
def __move_shoulder_up__(self, time_to_move):
if time_to_move != 0:
additional_time = time_to_move / 14
self.arm.shoulder.up(time_to_move + additional_time)
time.sleep(self.sleep_time)
def __move_elbow_down__(self, time_to_move):
if time_to_move != 0:
self.arm.elbow.down(time_to_move)
time.sleep(self.sleep_time)
def __move_elbow_up__(self, time_to_move):
if time_to_move != 0:
additional_time = time_to_move / 10
self.arm.elbow.up(time_to_move + additional_time)
time.sleep(self.sleep_time)
def __move_left__(self, time_to_move):
if time_to_move != 0:
self.arm.base.rotate_clock(time_to_move)
time.sleep(self.sleep_time)
def __move_right__(self, time_to_move):
if time_to_move != 0:
self.arm.base.rotate_counter(time_to_move)
time.sleep(self.sleep_time)
def __move_wrist_up__(self, time_to_move):
if time_to_move != 0:
self.arm.wrist.up(time_to_move)
time.sleep(self.sleep_time)
def __move_wrist_down__(self, time_to_move):
if time_to_move != 0:
self.arm.wrist.down(time_to_move)
time.sleep(self.sleep_time)
def grab(self):
self.arm.grips.close(0.2)
time.sleep(self.sleep_time)
def ungrab(self):
self.arm.grips.open(0.2)
time.sleep(self.sleep_time)
def move(self, move_coordinates):
if move_coordinates[0] == self.BASE_RIGHT:
self.__move_right__(move_coordinates[1])
if move_coordinates[0] == self.BASE_LEFT:
self.__move_left__(move_coordinates[1])
if move_coordinates[2] == self.ELBOW_DOWN:
self.__move_elbow_down__(move_coordinates[3])
if move_coordinates[2] == self.ELBOW_UP:
self.__move_elbow_up__(move_coordinates[3])
if move_coordinates[4] == self.SHOULDER_BACK:
self.__move_shoulder_up__(move_coordinates[5])
if move_coordinates[4] == self.SHOULDER_FRONT:
self.__move_shoulder_down__(move_coordinates[5])
if move_coordinates[6] == self.WRIST_UP:
self.__move_wrist_up__(move_coordinates[7])
if move_coordinates[6] == self.WRIST_DOWN:
self.__move_wrist_down__(move_coordinates[7])
def move_back_to_center(self, move_coordinates):
if move_coordinates[4] == self.SHOULDER_BACK:
self.__move_shoulder_down__(move_coordinates[5])
if move_coordinates[4] == self.SHOULDER_FRONT:
self.__move_shoulder_up__(move_coordinates[5])
if move_coordinates[2] == self.ELBOW_DOWN:
self.__move_elbow_up__(move_coordinates[3])
if move_coordinates[2] == self.ELBOW_UP:
self.__move_elbow_down__(move_coordinates[3])
if move_coordinates[0] == self.BASE_RIGHT:
self.__move_left__(move_coordinates[1])
if move_coordinates[0] == self.BASE_LEFT:
self.__move_right__(move_coordinates[1])
if move_coordinates[6] == self.WRIST_UP:
self.__move_wrist_down__(move_coordinates[7])
if move_coordinates[6] == self.WRIST_DOWN:
self.__move_wrist_up__(move_coordinates[7])
| true |
ad759c35865c55c62274e4b79e6a874220ef1835
|
Python
|
Payalkumari25/GUI
|
/tempCodeRunnerFile.py
|
UTF-8
| 142 | 2.75 | 3 |
[] |
no_license
|
top.title("2nd window")
img = ImageTk.PhotoImage(Image.open("D:/Tkinter/images/pink.png"))
my_label = Label(top,image=img).pack()
| true |
6362e79bc00f6be4b6fb17a62bfd4b22d3bbc988
|
Python
|
hbzhang/computervisionclass
|
/imageprocessing/elipse.py
|
UTF-8
| 1,290 | 2.9375 | 3 |
[] |
no_license
|
from skimage import data, color, img_as_ubyte
from skimage.feature import canny
from skimage.transform import hough_ellipse
from skimage.draw import ellipse_perimeter
import matplotlib.pyplot as plt
from skimage import io
# Load picture, convert to grayscale and detect edges
#image_rgb = io.imread('cup.png')
image_rgb = data.coffee()
#image_rgb = data.coffee()#[0:220, 160:420]
image_gray = color.rgb2gray(image_rgb)
edges = canny(image_gray, sigma=2.0,
low_threshold=0.55, high_threshold=0.8)
# Perform a Hough Transform
# The accuracy corresponds to the bin size of a major axis.
# The value is chosen in order to get a single high accumulator.
# The threshold eliminates low accumulators
result = hough_ellipse(edges, accuracy=20, threshold=50,
min_size=100, max_size=120)
result.sort(order='accumulator')
# Estimated parameters for the ellipse
best = list(result[-1])
yc, xc, a, b = [int(round(x)) for x in best[1:5]]
orientation = best[5]
# Draw the ellipse on the original image
cy, cx = ellipse_perimeter(yc, xc, a, b, orientation)
image_rgb[cy, cx] = (0, 0, 255)
# Draw the edge (white) and the resulting ellipse (red)
edges = color.gray2rgb(img_as_ubyte(edges))
edges[cy, cx] = (250, 0, 0)
plt.figure()
plt.imshow(edges)
plt.show()
| true |
bba1d012d7a2c543eb01c7f75fc5c1dc47615234
|
Python
|
didwns7347/algotest
|
/알고리즘문제/1699번 제곱수의 합.py
|
UTF-8
| 220 | 3.03125 | 3 |
[] |
no_license
|
import sys
n=sys.stdin.readline().strip()
n=int(n)
dp=[0]*(n+1)
for i in range(1,n+1):
dp[i]=i
j=1
while j*j<=i:
if dp[i] > dp[i-j*j]+1:
dp[i] = dp[i-j*j]+1
j += 1
print(dp[n])
| true |
5a6dd998b6f92ca2ca9b557a208d78a6b7d0f4b3
|
Python
|
TKfong/CMPE200-Enigma
|
/enigma.py
|
UTF-8
| 6,843 | 3.015625 | 3 |
[] |
no_license
|
#! /usr/bin/env python3
import sys
from easygui import *
class Enigma:
# Constructor/Initialize Enigma machine
def __init__(self, set1, set2, set3):
self.numcycles = 0
self.rotors = []
# Settings for the machine
# We locked in only 3 rotors
self.rotorsettings = [("III", set3),
("II", set2),
("I", set1)]
# We arbitrarily chose Reflector B
self.reflectorsetting = "B"
self.plugboardsetting = []
# Create the plugboard
self.plugboard = Plugboard(self.plugboardsetting)
# Create each of the rotors
for i in range(len(self.rotorsettings)):
self.rotors.append(Rotor(self.rotorsettings[i]))
# Create reflector
self.reflector = Reflector(self.reflectorsetting)
# Simple print function of setup information
def print_setup(self):
print()
# Print rotor sequence
print("Rotor sequence: (right to left)")
for r in self.rotors:
print(r.setting, "\t", r.sequence)
print()
# Print Reflector's sequence
print("Reflector sequence:")
print(self.reflector.setting, "\t", self.reflector.sequence, "\n")
# Print Plugboard settings; if any
print("Plugboard settings:")
print(self.plugboard.mapping, "\n")
# Reset the machine's rotors to default state:
def reset(self):
self.numcycles = 0
# Iterate throught the rotors
for r in self.rotors:
r.reset()
# Encrypt a single character
def encrypt(self, c):
# Force all messages to upper case
# Note: there is a bug when try to pass messages in lower case
c = c.upper()
# Check if it is a letter
if (not c.isalpha()):
return c
# Rotate everytime there is an input
self.rotors[0].rotate()
# Double step
if self.rotors[1].base[0] in self.rotors[1].notch:
self.rotors[1].rotate()
# Normal stepping
for i in range(len(self.rotors) - 1):
if(self.rotors[i].turnover):
self.rotors[i].turnover = False
self.rotors[i + 1].rotate()
# Passthrough the plugboard forward
index = self.plugboard.forward(c)
# Move through each of the rotors from III->II->I
for r in self.rotors:
index = r.forward(index)
# Pass through reflector B
index = self.reflector.forward(index)
# Move back through rotors in reverse: I->II->II
for r in reversed(self.rotors):
index = r.reverse(index)
# Passthrough the plugboard reverse
c = self.plugboard.reverse(index)
return c
class Rotor:
"""
Setting Wiring Notch Window Turnover
Base ABCDEFGHIJKLMNOPQRSTUVWXYZ
I EKMFLGDQVZNTOWYHXUSPAIBRCJ Y Q R
II AJDKSIRUXBLHWTMCQGZNPYFVOE M E F
III BDFHJLCPRTXVZNYEIWGAKMUSQO D V W
Inverted Wiring
Base ABCDEFGHIJKLMNOPQRSTUVWXYZ
I UWYGADFPVZBECKMTHXSLRINQOJ
II AJPCZWRLFBDKOTYUQGENHXMIVS
III TAGBPCSDQEUFVNZHYIXJWLRKOM
"""
# Constructor to initialize Rotor settings
def __init__(self, settings):
self.setting = settings[0]
self.ringoffset = settings[1]
self.base = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
self.settings = {
"I": ["EKMFLGDQVZNTOWYHXUSPAIBRCJ", ["R"], ["Q"]],
"II": ["AJDKSIRUXBLHWTMCQGZNPYFVOE", ["F"], ["E"]],
"III": ["BDFHJLCPRTXVZNYEIWGAKMUSQO", ["W"], ["V"]]}
self.turnovers = self.settings[self.setting][1]
self.notch = self.settings[self.setting][2]
self.sequence = None
self.turnover = False
self.reset()
# Reset the rotors to default setting
def reset(self):
self.base = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
self.sequence = self.settings[self.setting][0]
for _ in range(self.ringoffset):
self.rotate()
# Move right to left through the rotor
def forward(self, index):
return self.base.index(self.sequence[index])
# Move left to right back through the rotor
def reverse(self, index):
return self.sequence.index(self.base[index])
# Rotate the rotor once
def rotate(self):
self.base = self.base[1:] + self.base[:1]
self.sequence = self.sequence[1:] + self.sequence[:1]
if(self.base[0] in self.turnovers):
self.turnover = True
class Reflector:
"""
Setting Wiring
Base ABCDEFGHIJKLMNOPQRSTUVWXYZ
B YRUHQSLDPXNGOKMIEBFZCWVJAT
"""
# Constructor for Reflector
def __init__(self, setting):
self.setting = setting
self.base = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
self.settings = {"B": "YRUHQSLDPXNGOKMIEBFZCWVJAT"}
self.sequence = self.sequence_settings()
# Setup initial sequence for Reflector
def sequence_settings(self):
return self.settings[self.setting]
# Input into reflector
def forward(self, index):
return self.sequence.index(self.base[index])
class Plugboard:
# Constructor for Plugboard
def __init__(self, mapping):
mapping = [("A", "B"), ("C", "D")]
self.base = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
self.mapping = {}
for m in self.base:
self.mapping[m] = m
for m in mapping:
self.mapping[m[0]] = m[1]
self.mapping[m[1]] = m[0]
# Forward route through the plugboard
def forward(self, c):
return self.base.index(self.mapping[c])
# Backward route through the plugboard
def reverse(self, index):
return self.mapping[self.base[index]]
def main():
# Create GUI
msg = "Enter machine settings"
title = "Enigma Machine"
fieldNames = ["Rotor1","Rotor2","Rotor3","Input"]
fieldValues = [] # we start with blanks for the values
fieldValues = multenterbox(msg,title, fieldNames)
# make sure that none of the fields was left blank
while 1:
if fieldValues == None: break
errmsg = ""
for i in range(len(fieldNames)):
if fieldValues[i].strip() == "":
errmsg = errmsg + ('"%d" is a required field.\n\n' % fieldNames[i])
if errmsg == "": break # no problems found
fieldValues = multenterbox(errmsg, title, fieldNames, fieldValues)
# Initialize an Enigma machine
# Read inputs from the gui - fieldValues[]
machine = Enigma(int(fieldValues[0]),int(fieldValues[1]),int(fieldValues[2]))
# Output
ciphertext = ""
try:
# Read input when running program
# Input is now from gui
plaintext = fieldValues[3]
# Print the machine's setup
machine.print_setup()
# Print the initial message
print("Plaintext", "\t", plaintext)
# Loop through each character of message
# and feed into the machine's encrypt program
for character in plaintext:
ciphertext += machine.encrypt(character)
# Print out the encrypted message
print("Ciphertext", "\t", ciphertext)
# Reset and Decode same message
machine.reset()
plaintext = ""
for character in ciphertext:
plaintext += machine.encrypt(character)
print("Plaintext", "\t", plaintext, "\n")
except IndexError:
for plaintext in sys.stdin:
for character in plaintext:
sys.stdout.write(machine.encrypt(character))
if __name__ == '__main__':
main()
| true |
bb7dfbf9dcfb19894ec4946a9e5bcffa6276b5db
|
Python
|
tasosval/flask-simpleldap
|
/tests/conftest.py
|
UTF-8
| 933 | 2.71875 | 3 |
[
"MIT"
] |
permissive
|
import pytest
from flask import Flask
from flask_simpleldap import LDAP
from config import BaseConfig
@pytest.fixture(scope='class')
def app(request):
'''A Flask application object with an automatically pushed context
It is also configured by the BaseConfig object
The scope for this is class, so we don't create an expensive object for simple tests
'''
app = Flask(__name__)
app.config.from_object(BaseConfig)
# Push the context so it can be used by the tests
ctx = app.app_context()
ctx.push()
# Pop the context when we are finished
def teardown():
ctx.pop()
request.addfinalizer(teardown)
return app
@pytest.fixture(scope='class')
def ldap(app):
'''The flask_simpleldap object with the proper initialization ready to be used
The scope for this is class, so we don't create an expensive object for simple tests
'''
ldap = LDAP(app)
return ldap
| true |
cec3f58d2993869bae6db6df12c527b9c175efa8
|
Python
|
Deeshant2234/comm
|
/coding/codes/conv/Prev_stage.py
|
UTF-8
| 1,262 | 2.71875 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 7 08:51:42 2017
@author: hemanth
"""
#Starts from the current decoded state, takes input as minimum distance to
#reach that state and previous state And returns previous state and decoded
#information bit corresponding to that state.
import numpy as np
def prev_stage(curr_state,distance_prev,metric):
if(curr_state==0):
if(distance_prev[0]+metric[0] <= distance_prev[2]+metric[4]):
prev_state=0
decoded_bit=0
else:
prev_state=2
decoded_bit=0
elif(curr_state==1):
if(distance_prev[0]+metric[1] <= distance_prev[2]+metric[5]):
prev_state=0
decoded_bit=1
else:
prev_state=2
decoded_bit=1
elif(curr_state==2):
if(distance_prev[1]+metric[2] <= distance_prev[3]+metric[6]):
prev_state=1
decoded_bit=0
else:
prev_state=3
decoded_bit=0
elif(curr_state==3):
if(distance_prev[1]+metric[3] <= distance_prev[3]+metric[7]):
prev_state=1
decoded_bit=1
else:
prev_state=3
decoded_bit=1
return prev_state,decoded_bit
| true |
b9d23de8f6ff2827787afd433cb958c8b2fd0145
|
Python
|
RunSi/South_Asia_Programmability
|
/funcs.py
|
UTF-8
| 103 | 2.96875 | 3 |
[] |
no_license
|
def add(num1, num2):
result = num1 + num2
return result
def say_hello():
print("Hello!")
| true |
b345226d47ad4faeaff87aa532c0d8da3d81da82
|
Python
|
FastStonewkx/SpiderPython
|
/Code/requests_repo/RegularExp/sub2.py
|
UTF-8
| 273 | 2.875 | 3 |
[] |
no_license
|
import re
html = open("example.html", 'r', encoding='UTF-8')
try:
file_text = html.read()
# print(file_text)
result = re.findall('<li.*?>\s*?(<a.*?>)?(\w+)(</a>)?\s*?</li>', file_text, re.S)
for res in result:
print(res[1])
finally:
html.close()
| true |
e90d8b71761be3642fa653865e432b521bc8dec7
|
Python
|
Ben-Baert/Exercism
|
/python/sublist/sublist.py
|
UTF-8
| 513 | 3.390625 | 3 |
[] |
no_license
|
SUBLIST = 1
SUPERLIST = 2
EQUAL = 3
UNEQUAL = 4
def is_sublist(lst1, lst2):
if not lst1 or any(lst1 == lst2[i:i+len(lst1)] for i in range(len(lst2))):
return True
return False
def is_superlist(lst1, lst2):
return is_sublist(lst2, lst1)
def check_lists(lst1, lst2):
if lst1 == lst2:
return EQUAL
if len(lst1) <= len(lst2) and is_sublist(lst1, lst2):
return SUBLIST
if len(lst1) > len(lst2) and is_superlist(lst1, lst2):
return SUPERLIST
return UNEQUAL
| true |
770339eb3c8822bea339733ce6713319cf3f32a0
|
Python
|
18F/snap-api-prototype
|
/snap_financial_factors/deductions/medical_expenses_deduction.py
|
UTF-8
| 3,914 | 3.140625 | 3 |
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
from snap_financial_factors.deductions.deduction_result import DeductionResult
class MedicalExpensesDeduction:
'''
Calculates medical expenses deduction for households that include a member
who is over 60 years old, blind, or disabled.
'''
def __init__(self,
household_includes_elderly_or_disabled: bool,
medical_expenses_for_elderly_or_disabled: int,
standard_medical_deduction: bool,
standard_medical_deduction_amount: int) -> None:
self.household_includes_elderly_or_disabled = household_includes_elderly_or_disabled
self.medical_expenses_for_elderly_or_disabled = medical_expenses_for_elderly_or_disabled
self.standard_medical_deduction = standard_medical_deduction
self.standard_medical_deduction_amount = standard_medical_deduction_amount
def calculate(self) -> DeductionResult:
explanation = [
"Next, deduct monthly medical expenses for elderly or disabled household members beyond $35. "
]
if not self.household_includes_elderly_or_disabled:
explanation.append(
"In this case, there are no elderly or disabled members of " +
"the household, so the deduction does not apply. "
)
return DeductionResult(
result=0,
explanation=explanation
)
if self.medical_expenses_for_elderly_or_disabled == 0:
explanation.append(
"In this case, there are no monthly medical expenses to deduct. "
)
return DeductionResult(
result=0,
explanation=explanation
)
if 35 >= self.medical_expenses_for_elderly_or_disabled:
explanation.append(
"In this case, medical expenses are below the $35 monthly threshold for deduction. "
)
return DeductionResult(
result=0,
explanation=explanation
)
if self.standard_medical_deduction is True:
standard_medical_deduction_amount = self.standard_medical_deduction_amount
medical_expenses = self.medical_expenses_for_elderly_or_disabled
expenses_above_standard = medical_expenses > (standard_medical_deduction_amount + 35)
if expenses_above_standard:
medical_expenses_deduction = medical_expenses - 35
explanation.append(
f"Medical expenses are greater than the Standard Medical Deduction amount of ${standard_medical_deduction_amount}. " +
f"In this case, the full medical expense amount less $35 can be deducted, which comes to ${medical_expenses_deduction}. "
)
return DeductionResult(
result=medical_expenses_deduction,
explanation=explanation
)
else:
explanation.append(
f"This state has a Standard Medical Deduction amount of ${standard_medical_deduction_amount}. "
)
return DeductionResult(
result=standard_medical_deduction_amount,
explanation=explanation
)
medical_expenses_deduction = self.medical_expenses_for_elderly_or_disabled - 35
explanation.append(
"The medical expenses deduction is equal to monthly medical expenses " +
"beyond $35."
)
explanation.append('')
explanation.append(
f"${self.medical_expenses_for_elderly_or_disabled} - $35 = " +
f"${medical_expenses_deduction} medical expenses deduction"
)
return DeductionResult(
result=medical_expenses_deduction,
explanation=explanation
)
| true |
b277f65bd0fcf7f0351caf35b52e560833c9dbbe
|
Python
|
SalimRR/-
|
/5_2.py
|
UTF-8
| 455 | 4.09375 | 4 |
[] |
no_license
|
a = int()
b = int()
def maximal(a, b):
"""Напишите целочисленные значения a и b"""
help(maximal)
a = int(input('Введите целое число для а: '))
b = int(input('Введите целое число для b: '))
if a > b:
print('max = ', a)
elif b>a:
print('max = ', b)
else:
print('a не может быть равно b')
maximal(a, b)
| true |
40661169d7affd5e9b8d790362f6c8265e21ff75
|
Python
|
Nyapy/TIL
|
/04_algorithm/18day/0906/(2819)격자판의숫자이어 붙이기.py
|
UTF-8
| 770 | 3 | 3 |
[] |
no_license
|
def dfs(x, y, k, n):
global cnt
if k == 7:
if visit[n] != tc:
cnt += 1
visit[n] = tc
return
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if nx < 0 or nx >= 4 or ny < 0 or ny >= 4: continue
dfs(nx, ny, k + 1, n * 10 + data[nx][ny])
import sys
sys.stdin = open("(2819)격자판의숫자이어 붙이기_input.txt")
T = int(input())
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
visit = [0] * 10000000
for tc in range(1, T+1):
data = [[0 for _ in range(4)] for _ in range(4)]
cnt = 0
for i in range(4):
data[i] = list(map(int, input().split()))
for i in range(4):
for j in range(4):
dfs(i, j, 1, data[i][j])
print("#{} {}".format(tc, cnt))
| true |
5eee1c88d77e2bf4db286328b4c2a5f53a420e7b
|
Python
|
BalajiShivakumar/Python
|
/FGame.py
|
UTF-8
| 72 | 3.421875 | 3 |
[] |
no_license
|
numbers = [5, 2, 5, 2, 2]
for output in numbers:
print("x" * output)
| true |
869ed0e9b729a8676890b80b95990c49765829b0
|
Python
|
wufanyou/WRL-Agriculture-Vision
|
/utils/losses/hybird.py
|
UTF-8
| 3,067 | 2.5625 | 3 |
[
"MIT"
] |
permissive
|
import torch.nn as nn
from torch import Tensor
from typing import Optional
from .lovasz_loss import CustomizeLovaszLoss, LovaszLoss
from .binary_cross_entropy import (
MaskBinaryCrossEntropyIgnoreIndex,
MaskBinaryCrossEntropy,
)
from .dice_loss import CustomizeDiceLoss
from .jaccard import CustomiseJaccardLoss
from .focal_loss import CustomizeFocalLoss
__ALL__ = ["Hybird", "HybirdV3", "HybirdV4"]
class Hybird(nn.Module):
def __init__(self, l1: float = 1.0, weight: Optional = None, **kwargs):
super(Hybird, self).__init__()
self.BCE = MaskBinaryCrossEntropyIgnoreIndex(weight=weight)
self.lovasz_loss = CustomizeLovaszLoss()
self.l1 = l1
def forward(self, pred: Tensor, target: Tensor, mask: Tensor) -> Tensor:
N, C, W, H = pred.shape
mask = mask[:, None].expand([N, C, W, H])
target[mask == 0] = 255
loss = self.BCE(pred, target) + self.l1 * self.lovasz_loss(pred, target)
loss /= 1 + self.l1
return loss
class HybirdV3(nn.Module):
def __init__(self, l1: float = 1.0, weight: Optional = None, **kwargs):
super(HybirdV3, self).__init__()
self.lovasz_loss = LovaszLoss(mode="multiclass", ignore_index=255)
self.ce = nn.CrossEntropyLoss(ignore_index=255)
self.l1 = l1
def forward(self, pred: Tensor, target: Tensor, mask: Tensor) -> Tensor:
target = target.argmax(1)
target[mask == 0] = 255
loss = self.ce(pred, target) + self.l1 * self.lovasz_loss(pred, target)
loss /= 1 + self.l1
return loss
class HybirdV4(nn.Module):
def __init__(self, l1: float = 1.0, weight: Optional = None, **kwargs):
super(HybirdV4, self).__init__()
self.bce = MaskBinaryCrossEntropy(weight=weight)
self.jaccard = CustomiseJaccardLoss(**kwargs)
self.l1 = l1
def forward(self, pred: Tensor, target: Tensor, mask: Tensor) -> Tensor:
loss = self.bce(pred, target, mask) + self.l1 * self.jaccard(pred, target, mask)
loss /= 1 + self.l1
return loss
class HybirdV5(nn.Module):
def __init__(self, l1: float = 1.0, weight: Optional = None, **kwargs):
super(HybirdV5, self).__init__()
self.bce = MaskBinaryCrossEntropy(weight=weight)
self.dice = CustomizeDiceLoss(**kwargs)
self.l1 = l1
def forward(self, pred: Tensor, target: Tensor, mask: Tensor) -> Tensor:
loss = self.bce(pred, target, mask) + self.l1 * self.dice(pred, target, mask)
loss /= 1 + self.l1
return loss
class HybirdV6(nn.Module):
def __init__(self, l1: float = 1.0, weight: Optional = None, **kwargs):
super(HybirdV6, self).__init__()
self.focal = CustomizeFocalLoss(**kwargs)
self.jaccard = CustomiseJaccardLoss(**kwargs)
self.l1 = l1
def forward(self, pred: Tensor, target: Tensor, mask: Tensor) -> Tensor:
loss = self.focal(pred, target, mask) + self.l1 * self.jaccard(
pred, target, mask
)
loss /= 1 + self.l1
return loss
| true |
8f17fdbeef89f5a5c60dbf0f0f0014f097aecb56
|
Python
|
mayra1228/LearnSpider
|
/Demo9.py
|
UTF-8
| 738 | 2.921875 | 3 |
[] |
no_license
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# @Time : 2018/9/26 下午5:21
# @Author : mayra.zhao
# @File : Demo9.py
import re
# 字符集的匹配
# a = re.match(r'[0-9]',"123456")
# print a.group()
#from typing import Match
# . 表示任一字符,除了\n
# a = re.match(r'...','123')
# print a.group()
a = re.match(r'[0-8]?[0-9]','95')
print a.group()
b = re.match(r'[A-Z][a-z]*', "XasssssX123")
print b.group()
c = re.match(r'\d{6,12}@qq\.com','2545620930@qq.com')
print c.group()
d = re.match(r'[0-9][a-z]*','9abcdef')
print d.group()
e = re.match(r'[0-9][a-z]*?','9abcdef')
print e.group()
f = re.match(r'[0-9][a-z]+?','9abcdef')
print f.group()
content = "nick\nnjenny\nsuo"
a = re.search(r'^s.*', content, re.M )
print a.group()
| true |
f09e455e36b78e4a15c3fe5797fe651f3fc95fce
|
Python
|
vippermanu/new_mal_domain_profile
|
/fig_sys_4_22now/fig_sys_4_22 -now/models/detect_results.py
|
UTF-8
| 11,655 | 2.640625 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from Base import Base
class QueryDetectResut(Base):
"""
第三方接口检测域名恶意性结果查询类
"""
def __init__(self):
Base.__init__(self)
@staticmethod
def extract_maltype(single_result,default_type):
"""
提取安全类别,组合成字典形式:{detect_result:检测结果,detect_type:安全级别,detect_time:检测时间}
:param single_result: {_rs:检测结果,_it:检测时间}
:param default_type: 检测工具
:return: {detect_result:检测结果,detect_type:安全级别,detect_time:检测时间}
"""
if default_type == 'tencent':
single_result['detect_time'] = str(single_result['tm_it'])
del single_result['tm_it']
result = single_result.get('tm_rs')
single_result['detect_result'] = result
del single_result['tm_rs']
if result.find('危险')!=-1:
single_result['detect_type'] = "危险"
elif result.find('未知')!=-1:
single_result['detect_type'] = "未知"
elif result.find('安全')!=-1:
single_result['detect_type'] = "安全"
else:
single_result['detect_result'] = "检测结果未知"
single_result['detect_type'] = "待检测"
print "新类型,重新区分/还未检测"
elif default_type == 'baidu':
single_result['detect_time'] = str(single_result['bd_it'])
del single_result['bd_it']
result = single_result.get('bd_rs')
del single_result['bd_rs']
single_result['detect_result'] = result
if result is None:
single_result['detect_result'] = "检测结果未知"
single_result['detect_type'] = "待检测"
else:
single_result['detect_type'] = result
elif default_type == 'sanliuling':
single_result['detect_time'] = str(single_result['sl_it'])
del single_result['sl_it']
result = single_result.get('sl_rs')
del single_result['sl_rs']
single_result['detect_result'] = result
if result is None:
single_result['detect_result'] = "检测结果未知"
single_result['detect_type'] = "待检测"
elif result in ["高危",'严重','警告']:
single_result['detect_type'] = "危险"
elif result=='安全':
single_result['detect_type'] = "安全"
elif result.find('未知')!=-1:
single_result['detect_type'] = "未知"
single_result['detect_result'] = "安全性得分未知"
else:
single_result['detect_result'] = "检测结果未知"
single_result['detect_type'] = "待检测"
print "新类型,重新区分/还未检测"
elif default_type == 'jinshan':
single_result['detect_time'] = str(single_result['js_it'])
del single_result['js_it']
result = single_result.get('js_rs')
del single_result['js_rs']
single_result['detect_result'] = result
if result is not None and result in ["危险","安全","未知"]:
single_result['detect_type'] = result
else:
single_result['detect_result'] = "检测结果未知"
single_result['detect_type'] = "待检测"
elif default_type == 'macfree':
single_result['detect_time'] = str(single_result['mf_it'])
del single_result['mf_it']
result = single_result.get('mf_rs')
del single_result['mf_rs']
if result == "High Risk":
single_result['detect_result']="高度风险"
single_result['detect_type'] = "危险"
elif result == "Medium Risk":
single_result['detect_result'] = "中度风险"
single_result['detect_type'] = "危险"
elif result == "Minimal Risk":
single_result['detect_result'] = "低度风险"
single_result['detect_type'] = "安全"
elif result == "Univerified":
single_result['detect_result'] = "风险未知"
single_result['detect_type'] = "未知"
else:
single_result['detect_type'] = "待检测"
single_result['detect_result'] = "检测结果未知"
print "新类型,重新区分/还未检测"
elif default_type == 'virustotal':
single_result['detect_time'] = str(single_result['vt_it'])
del single_result['vt_it']
result = single_result.get('vt_rs')
del single_result['vt_rs']
if result is None:
single_result['detect_result'] = "检测结果未知"
single_result['detect_type'] = "待检测"
print "新类型,重新区分/还未检测"
else:
print result
result = eval(result)
single_result['detect_result'] = str(result['malicious rate'])
if str(result['malicious rate'])[0]!="0":
single_result['detect_type'] = "危险"
elif str(result['unrated_rate'])[0]!="0":
single_result['detect_type'] = "未知"
else:
single_result['detect_type'] = "安全"
else:
single_result = dict(
detect_result = '',
detect_type = '',
detect_time = ''
)
print "输入类型错误..."
return single_result
@staticmethod
def convert_shape(result,default_type):
"""
组合成字典形式
:param result: {_rs:检测结果,_it:检测时间}
:param default_type: all/tencent/baidu/sanliuling/jinshan/macfree/virustotal
:return: all对应的结果为{
'tencent':{detect_result:检测结果,detect_type:安全级别,detect_time:检测时间},
'baidu':{detect_result:检测结果,detect_type:安全级别,detect_time:检测时间},
...
'virustotal':{detect_result:检测结果,detect_type:安全级别,detect_time:检测时间}
}
"""
if not isinstance(result,dict):
print "非字典形式,无法转换"
return result
else:
if default_type != 'all':
converted_result = QueryDetectResut.extract_maltype(result,default_type)
else:
tm_result = QueryDetectResut.extract_maltype(dict(
tm_rs =result['tm_rs'],
tm_it=result['tm_it']
),'tencent')
bd_result = QueryDetectResut.extract_maltype(dict(
bd_rs =result['bd_rs'],
bd_it=result['bd_it']
),'baidu')
sl_result = QueryDetectResut.extract_maltype(dict(
sl_rs =result['sl_rs'],
sl_it=result['sl_it']
),'sanliuling')
js_result = QueryDetectResut.extract_maltype(dict(
js_rs =result['js_rs'],
js_it=result['js_it']
),'jinshan')
mf_result = QueryDetectResut.extract_maltype(dict(
mf_rs =result['mf_rs'],
mf_it=result['mf_it']
),'macfree')
vt_result = QueryDetectResut.extract_maltype(dict(
vt_rs =result['vt_rs'],
vt_it=result['vt_it']
),'virustotal')
converted_result = {
'tencent':tm_result,
'baidu':bd_result,
'sanliuling':sl_result,
'jinshan':js_result,
'macfree':mf_result,
'virustotal':vt_result
}
return converted_result
def get_detect_results(self,domain,default_type='all'):
"""
获取对应工具的检测结果字典{detect_result:检测结果,detect_type:安全级别,detect_time:检测时间}
:param domain: 待检测域名
:param default_type: 默认值为all,可选择tencent/baidu/sanliuling/jinshan/macfree/virustotal
:return: 默认情况下的结果{
'tencent':{detect_result:检测结果,detect_type:安全级别,detect_time:检测时间},
'baidu':{detect_result:检测结果,detect_type:安全级别,detect_time:检测时间},
...
'virustotal':{detect_result:检测结果,detect_type:安全级别,detect_time:检测时间}
}
"""
if default_type == 'tencent':
sql = "select TencentManager_result as tm_rs,tm_insert_time as tm_it " \
" from detect_results where domain=%s"
elif default_type == 'baidu':
sql = "select BaiduDefender_result as bd_rs,bd_insert_time as bd_it " \
" from detect_results where domain=%s"
elif default_type == 'sanliuling':
sql = "select sanliuling_result as sl_rs,sl_insert_time as sl_it " \
" from detect_results where domain=%s"
elif default_type == 'jinshan':
sql = "select jinshan_result as js_rs,js_insert_time as js_it " \
" from detect_results where domain=%s"
elif default_type == 'macfree':
sql = "select macfree_result as mf_rs,mf_insert_time as mf_it " \
" from detect_results where domain=%s"
elif default_type == 'virustotal':
sql = "select virustotal_result as vt_rs,vt_insert_time as vt_it " \
" from detect_results where domain=%s"
else:#default
sql = "select TencentManager_result as tm_rs,tm_insert_time as tm_it," \
"BaiduDefender_result as bd_rs,bd_insert_time as bd_it," \
"sanliuling_result as sl_rs,sl_insert_time as sl_it," \
"jinshan_result as js_rs,js_insert_time as js_it," \
"macfree_result as mf_rs,mf_insert_time as mf_it," \
"virustotal_result as vt_rs,vt_insert_time as vt_it " \
" from detect_results where domain=%s"
result = self.mysql_db.get(sql,domain)
for key in result:
if not result[key] and key != 'vt_rs' and key != 'vt_it':
result[key] = '---'
converted_result = QueryDetectResut.convert_shape(result,default_type)
return converted_result
if __name__ == "__main__":
qr = QueryDetectResut()
# print qr.get_detect_results('0-360c.com','tencent')
# print qr.get_detect_results('vns36066.com','virustotal')
# print qr.get_detect_results('0-360c.com', 'baidu')
# print qr.get_detect_results('0-360c.com', 'sanliuling')
# print qr.get_detect_results('0-360c.com', 'jinshan')
# print qr.get_detect_results('0-360c.com', 'macfree')
# print qr.get_detect_results('0-360c.com', 'virustotal')
# res = qr.get_detect_results('000000.in')
# for key,value in res.iteritems():
# print key
# for k,v in value.iteritems():
# print k
# print v
print qr.get_detect_results('000000.in')
# print qr.get_detect_results('0371tk.com')
| true |
608d17df7bae73e3f6d84734bedb86552b31209e
|
Python
|
cameronpepe/cs-3600-projects
|
/Project+4b/Project 4b/cpepe3_project4b/Testing.py
|
UTF-8
| 3,633 | 2.703125 | 3 |
[] |
no_license
|
from NeuralNetUtil import buildExamplesFromCarData,buildExamplesFromPenData, buildExamplesFromXorData
from NeuralNet import buildNeuralNet
import cPickle
from math import pow, sqrt
import sys
import getopt
def average(argList):
return sum(argList)/float(len(argList))
def stDeviation(argList):
mean = average(argList)
diffSq = [pow((val-mean),2) for val in argList]
return sqrt(sum(diffSq)/len(argList))
penData = buildExamplesFromPenData()
def testPenData(hiddenLayers = [24]):
return buildNeuralNet(penData,maxItr = 200, hiddenLayerList = hiddenLayers)
carData = buildExamplesFromCarData()
def testCarData(hiddenLayers = [16]):
return buildNeuralNet(carData,maxItr = 200,hiddenLayerList = hiddenLayers)
xorData = buildExamplesFromXorData()
def testXorData(hiddenLayers = [1]):
return buildNeuralNet(xorData, maxItr = 200, hiddenLayerList= hiddenLayers)
def q5():
print 'Question 5'
penAccuracy = []
carAccuracy = []
for i in range(5):
penAccuracy.append(testPenData()[1])
carAccuracy.append(testCarData()[1])
penStats = {'max': max(penAccuracy), 'avg': average(penAccuracy), 'stDev': stDeviation(penAccuracy)}
carStats = {'max': max(carAccuracy), 'avg': average(carAccuracy), 'stDev': stDeviation(carAccuracy)}
print 'pen statistics:'
print penStats
print 'car statistics:'
print carStats
def q6():
print 'Question 6'
penStats = {}
carStats = {}
for percepNum in xrange(0, 41, 5):
penAccuracy = []
carAccuracy = []
for i in range(5):
percep = [percepNum]
penAccuracy.append(testPenData(percep)[1])
carAccuracy.append(testCarData(percep)[1])
percepNumString = str(percepNum)
penStats[percepNumString] = {'max': max(penAccuracy), 'avg': average(penAccuracy), 'stDev': stDeviation(penAccuracy)}
carStats[percepNumString] = {'max': max(carAccuracy), 'avg': average(carAccuracy), 'stDev': stDeviation(carAccuracy)}
print 'pen statistics, ', percepNum, ' perceptrons'
print penStats[percepNumString]
print 'car statistics, ', percepNum, ' perceptrons'
print carStats[percepNumString]
print 'final pen statistics:'
print penStats
print 'final car statistics'
print carStats
def q7():
print 'question 7'
stats = {}
hlNum = 0
while True:
accuracy = []
for i in range(5):
hl = [hlNum]
accuracy.append(testXorData(hl)[1])
hlNumString = str(hlNum)
stats[hlNumString] = {'max': max(accuracy), 'avg': average(accuracy), 'stDev': stDeviation(accuracy)}
print 'stats, ', hlNumString, ' hidden layer perceptrons'
print stats[hlNumString]
if average(accuracy) == 1.0:
break
hlNum += 1
print 'final stats'
print stats
def main():
args = sys.argv
questions = ['q5','q6','q7']
if len(args)==1:
print '\nPlease input question number (5, 6, or 7). For example: -q q5'
else:
opts, args = getopt.getopt(args[1:],"q",["q="])
for opt, arg in opts:
if opt=='-q' or opt=='--q':
if args[0] in questions:
if args[0] == 'q5':
q5()
elif args[0] == 'q6':
q6()
else:
q7()
else:
print 'can only test q5 and q6 and q7'
else:
print '\nPlease input question number (5, 6, or 7). For example: -q q5'
if __name__=='__main__':
main()
| true |
809a56b96fda9c78d9b91fb1e931543c67a88b69
|
Python
|
vimarshc/SVGedit
|
/myarea.py
|
UTF-8
| 2,051 | 3.09375 | 3 |
[] |
no_license
|
def curvy(l):
q = l.split()
list = abs(q)
return list
def areacal(l):
q = l.split()
list = abs(q)
ar = 0
for x in list:
ar = ar + area(x[0],x[1],x[3],x[4],x[5],x[6],x[7],x[8])
return ar*(-1)
def area(x0,y0,x1,y1,x2,y2,x3,y3):
area = (3.0/10.0)*y1*x0 - (3.0/20.0)*y1*x2 - (3.0/20.0)*y1*x3 - (3.0/10.0)*y0*x1 - (3.0/20.0)*y0*x2 - (1.0/20.0)*y0*x3 + (3.0/20.0)*y2*x0 + (3.0/20.0)*y2*x1 - (3.0/10.0)*y2*x3 + (1.0/20.0)*y3*x0 + (3.0/20.0)*y3*x1 + (3.0/10.0)*y3*x2
return area
def abs(strlist):
megalist = []
for i in range(len(strlist)):
if strlist[i] == 'c':
chis(strlist[i-2], strlist[i-1], i+1, strlist,megalist)
break
elif strlist[i] == 'l':
line(strlist[i-2], strlist[i-1],i+1,strlist,megalist)
break
return megalist
def chis(x,y,index, array,megalist):
chota=[int(x),int(y),
'C',int(x)+int(array[index]),int(y)+int(array[index+1]),
int(x)+int(array[index+2]),int(y)+int(array[index+3]),
int(x)+int(array[index+4]),int(y)+int(array[index+5])]
megalist.append(chota)
if index + 6 == len(array):
return
if array[index + 6].isdigit():
chis(int(x)+int(array[index+4]),int(y)+int(array[index+5]),index+6,array,megalist)
if array[index + 6] == 'l':
line(int(x)+int(array[index+4]),int(y)+int(array[index+5]),index+7,array,megalist)
def line(x,y,index,array,megalist):
chota=[int(x),int(y),
'C',(int(x)+int(array[index]))/2, (int(y)+int(array[index+1]))/2,
(2*int(x)+int(array[index]))/3, (2*int(y)+int(array[index+1]))/3,
(int(x)+int(array[index])),(int(y)+int(array[index+1]))]
megalist.append(chota)
if index+2 == len(array):
return
if array[index+2].isdigit():
line((int(x)+int(array[index])),(int(y)+int(array[index+1])), index+2, array,megalist)
if array[index+2] == 'c':
chis((int(x)+int(array[index])),(int(y)+int(array[index+1])),index+3, array,megalist)
| true |
709a5540b99e26e7fb1f3a4e0859aad01ac592b9
|
Python
|
nhardy/etude-01
|
/main.py
|
UTF-8
| 5,601 | 3.84375 | 4 |
[] |
no_license
|
#!/usr/bin/env python3
"""
Etude 1: Ants on a Plane
Authors: Kimberley Louw, Nathan Hardy
Simulation of creatures related to Langton's Ant.
"""
import re
import sys
# Mathematical convention of directional step to coordinate
_DIRECTIONS = {
'N': (0, 1),
'E': (1, 0),
'S': (0, -1),
'W': (-1, 0),
}
# Ordered direction list
_DIRS = ['N', 'E', 'S', 'W']
# Regular expressions
_DNA_STRAND = re.compile(r'(\S) ([NESW]{4}) (\S{4})', re.I)
_IS_NUMERIC = re.compile(r'^\d+$')
class Strand:
"""
DNA Strand class used for abstraction
"""
def __init__(self, initial: str, directions: str, states: str):
self.initial = initial
self._in_to_out_direction = {
direction: directions[index] for index, direction in enumerate(_DIRS)
}
self._in_to_out_state = {
direction: states[index] for index, direction in enumerate(_DIRS)
}
@classmethod
def from_raw(cls, raw: str):
"""
Returns a new Strand object, given its raw string representation
"""
match = _DNA_STRAND.match(raw)
initial = match.group(1)
directions = match.group(2).upper()
states = match.group(3)
return cls(initial, directions, states)
def out_direction(self, in_direction: str) -> str:
"""
Given an in_direction, returns the corresponding out direction
for the current strand
"""
return self._in_to_out_direction[in_direction]
def out_state(self, in_direction: str) -> str:
"""
Given an in_direction, returns the corresponding out state
for the current strand
"""
return self._in_to_out_state[in_direction]
def __str__(self) -> str:
return '{} {} {}'.format(
self.initial,
''.join(map(self.out_direction, _DIRS)),
''.join(map(self.out_state, _DIRS)),
)
class Ant:
"""
Class for the Ant
"""
def __init__(self, plane, strands: list):
self._plane = plane
# Create a map of key-value pairs;
# Key: Initial state that the strand applies to
# Value: Strand (including rules)
self._strands = {
strand.initial: strand for strand in strands
}
self.x = 0 # Ant x position
self.y = 0 # Ant y position
# Assuming the Ant's initial step was to the North
self._previous = 'N'
def move(self):
"""
Moves the Ant once, according to its internal rules
"""
# Get current state
state = self._plane.get_state(self.x, self.y)
# Get relevant strand
strand = self._strands[state]
# Set Plane state for Ant's current position to the new
# value, as determined by the strand
self._plane.set_state(
self.x,
self.y,
strand.out_state(self._previous),
)
# Get the appropriate direction from the strand
# based on the previous step direction
direction = strand.out_direction(self._previous)
# Get the x and y delta for the new direction
dx, dy = _DIRECTIONS[direction]
# move ant
self.x += dx
self.y += dy
self._previous = direction # update previous directional step
class Plane:
"""
Infinite Plane class, helpful for abstracting away default cell state
"""
def __init__(self, default: str):
self.default = default
# Initialise cells as a dictionary of coordinate tuples to states
# This way, the plane can extend infinitely without having to worry
# about allocating space in a list
self._cells = {
(0, 0): self.default,
}
def get_state(self, x: int, y: int) -> str:
"""
Gets the current state for the point (x, y)
"""
if (x, y) in self._cells:
return self._cells[(x, y)]
return self.default
def set_state(self, x: int, y: int, state: str) -> str:
"""
Sets the state at (x, y)
"""
self._cells[(x, y)] = state
class Scenario:
"""
Scenario class initialises the Ant, and provides a __str__ method
which prints the Scenario in the format described in the etude
"""
def __init__(self, strands: list, steps: int):
default_state = strands[0].initial
self.strands = strands
self.ant = Ant(Plane(default_state), strands)
self.steps = steps
for _ in range(steps):
self.ant.move()
def result(self) -> str:
"""
Method returns the final position of the Ant, in the
format specified by the etude
"""
return '# {} {}'.format(self.ant.x, self.ant.y)
def __str__(self) -> str:
return '\n'.join([
*list(map(str, self.strands)),
str(self.steps),
self.result(),
])
def main():
"""
Main program method
"""
scenarios = []
strands = []
for unstripped_line in sys.stdin.readlines():
line = unstripped_line.strip()
if line == '': # ignore blank lines
continue
if line.startswith('#'): # ignore comments
continue
if _IS_NUMERIC.match(line): # number of steps ending scenario
steps = int(line)
scenarios.append(Scenario(strands, steps))
strands = []
else: # DNA strand
strands.append(Strand.from_raw(line))
print('\n\n'.join(map(str, scenarios)))
if __name__ == '__main__':
main()
| true |
9a165730486ed2037ee753eb62431c4586769c14
|
Python
|
jubaer145/jina
|
/jina/jaml.py
|
UTF-8
| 8,560 | 2.9375 | 3 |
[
"Apache-2.0"
] |
permissive
|
import collections
import os
import re
from types import SimpleNamespace
from typing import Dict, Any
import yaml
from yaml import MappingNode
from yaml.composer import Composer
from yaml.constructor import ConstructorError, FullConstructor
from yaml.parser import Parser
from yaml.reader import Reader
from yaml.resolver import Resolver
from yaml.scanner import Scanner
__all__ = ['JAML']
class JAML:
"""A Jina style YAML loader and dumper, a wrapper on PyYAML.
To use it:
.. highlight:: python
.. code-block:: python
from jina.jaml import JAML
JAML.load(...)
JAML.dump(...)
class DummyClass:
pass
JAML.register(DummyClass)
You can use expressions to programmatically set variables in YAML files and access contexts.
An expression can be any combination of literal values, references to a context, or functions.
You can combine literals, context references, and functions using operators.
You need to use specific syntax to tell Jina to evaluate an expression rather than treat it as a string.
.. highlight:: yaml
.. code-block:: yaml
${{ <expression> }}
To evaluate (i.e. substitute the value to the real value) the expression when loading, use :meth:`load(substitute=True)`.
To substitute the value based on a dict,
.. highlight:: python
.. code-block:: python
obk = JAML.load(fp, substitute=True,
context={'context_var': 3.14,
'context_var2': 'hello-world'})
"""
@staticmethod
def load(stream,
substitute: bool = False,
context: Dict[str, Any] = None):
"""Parse the first YAML document in a stream and produce the corresponding Python object.
:param substitute: substitute environment, internal reference and context variables.
:param context: context replacement variables in a dict, the value of the dict is the replacement.
"""
r = yaml.load(stream, Loader=JinaLoader)
if substitute:
r = JAML.expand_dict(r, context)
return r
@staticmethod
def load_no_tags(stream, **kwargs):
"""Load yaml object but ignore all customized tags, e.g. !Executor, !Driver, !Flow
"""
safe_yml = '\n'.join(v if not re.match(r'^[\s-]*?!\b', v) else v.replace('!', '__tag: ') for v in stream)
return JAML.load(safe_yml, **kwargs)
@staticmethod
def expand_dict(d: Dict, context: Dict = None, resolve_cycle_ref=True) -> Dict[str, Any]:
from .helper import parse_arg
expand_map = SimpleNamespace()
context_map = SimpleNamespace()
env_map = SimpleNamespace()
pat = re.compile(r'\${{\s*([\w\[\].]+)\s*}}')
def _scan(sub_d, p):
if isinstance(sub_d, dict):
for k, v in sub_d.items():
if isinstance(v, dict):
p.__dict__[k] = SimpleNamespace()
_scan(v, p.__dict__[k])
elif isinstance(v, list):
p.__dict__[k] = list()
_scan(v, p.__dict__[k])
else:
p.__dict__[k] = v
elif isinstance(sub_d, list):
for idx, v in enumerate(sub_d):
if isinstance(v, dict):
p.append(SimpleNamespace())
_scan(v, p[idx])
elif isinstance(v, list):
p.append(list())
_scan(v, p[idx])
else:
p.append(v)
def _replace(sub_d, p):
if isinstance(sub_d, dict):
for k, v in sub_d.items():
if isinstance(v, dict) or isinstance(v, list):
_replace(v, p.__dict__[k])
else:
if isinstance(v, str) and pat.findall(v):
sub_d[k] = _sub(v, p)
elif isinstance(sub_d, list):
for idx, v in enumerate(sub_d):
if isinstance(v, dict) or isinstance(v, list):
_replace(v, p[idx])
else:
if isinstance(v, str) and pat.findall(v):
sub_d[idx] = _sub(v, p)
def _sub(v, p):
v = re.sub(pat, '{\\1}', v)
if resolve_cycle_ref:
try:
# "root" context is now the global namespace
# "this" context is now the current node namespace
v = v.format(root=expand_map, this=p, ENV=env_map)
except KeyError:
pass
try:
v = v.format_map(context)
except KeyError:
pass
if isinstance(v, str):
v = parse_arg(v)
return v
_scan(d, expand_map)
_scan(dict(os.environ), env_map)
_replace(d, expand_map)
return d
@staticmethod
def dump(data, stream=None, **kwargs):
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
"""
return yaml.dump(data, stream=stream, default_flow_style=False, **kwargs)
@staticmethod
def register(cls):
"""register a class for dumping loading
- if it has attribute yaml_tag use that to register, else use class name
- if it has methods to_yaml/from_yaml use those to dump/load else dump attributes
as mapping
"""
tag = getattr(cls, 'yaml_tag', '!' + cls.__name__)
try:
yaml.add_representer(cls, cls.to_yaml)
except AttributeError:
def t_y(representer, data):
return representer.represent_yaml_object(
tag, data, cls, flow_style=representer.default_flow_style
)
yaml.add_representer(cls, t_y)
try:
yaml.add_constructor(tag, cls.from_yaml, JinaLoader)
except AttributeError:
def f_y(constructor, node):
return constructor.construct_yaml_object(node, cls)
yaml.add_constructor(tag, f_y, JinaLoader)
return cls
class JinaConstructor(FullConstructor):
"""Convert List into tuple when doing hashing"""
def get_hashable_key(self, key):
try:
hash(key)
except:
if isinstance(key, list):
for i in range(len(key)):
if not isinstance(key[i], collections.abc.Hashable):
key[i] = self.get_hashable_key(key[i])
key = tuple(key)
return key
raise ValueError(f'unhashable key: {key}')
return key
def construct_mapping(self, node, deep=True):
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return self._construct_mapping(node, deep=deep)
def _construct_mapping(self, node, deep=True):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
'expected a mapping node, but found %s' % node.id,
node.start_mark)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=True)
if not isinstance(key, collections.abc.Hashable):
try:
key = self.get_hashable_key(key)
except Exception as exc:
raise ConstructorError('while constructing a mapping', node.start_mark,
'found unacceptable key (%s)' % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
class JinaResolver(Resolver):
"""Remove `on|On|ON` as bool resolver"""
pass
# remove on|On|ON resolver
JinaResolver.yaml_implicit_resolvers.pop('o')
JinaResolver.yaml_implicit_resolvers.pop('O')
class JinaLoader(Reader, Scanner, Parser, Composer, JinaConstructor, JinaResolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
JinaConstructor.__init__(self)
JinaResolver.__init__(self)
| true |
40be1aaee875dc1389c6435d6152d6e4f6a16333
|
Python
|
AlPus108/Python_lessons
|
/Generators/3_generator_functions.py
|
UTF-8
| 21,728 | 3.640625 | 4 |
[
"MIT"
] |
permissive
|
# ГЕНЕРАТОРЫ
# Генераторы, это итераторы. При помощи генераторов мы можем перебирать какой-то iyerable
# Не каждый итератор является генератором.
# Но, все генераторы являются итераторами.
# Генераторы могут быть созданы при помощи ф-и generator
# Так же, генераторы могут быть созданы при помощи generator expressions (генераторы выражений)
# Ничего общего с генераторами последовательностей здесь нет.
# Генераторы, это отдельная сущность.
# Необходимость использования генераторов возникает всегда, когда вы осуществляете перебор большого число объектов.
# В этой ситуации (при большом числе объектов) возникает необходимость хранить большое количество информации в памяти,
# что конечно же неудобно, будь то приложение на сервере, будь то статическое приложение на локальном компьютере.
# Забивается кэш и это сказывается на быстродействии процессов. Иногда бывают и такие объекты, которые просто
# не помещаются в память. На помощь в данной ситуации приходят генераторы.
# Например, нам необходимо хранить миллион записей и среди этого миллиона найти что-то.
# Это можно сделать по разному. Мы можем загрузить все эти записи в память, затем бегать по кэшу и искать
# нужные нам значения. Но, это в случае, если позволяет память.
# Но, чаще, для такой процедуры испльзуют генераторы. Это метод просмотра большого объема информации без использования
# памяти компьютера. Генератор каждый раз вынимает один элемент из того множества, в котором осуществляется перебор.
# Здесь разберем, как создавать генераторы при помощи ф-й генераторов
# Мы уже разбирали ф-и, которые могут возвращать какое-то значание.
def my_function(x):
return x
my_function(4) # 4 - получаем значение, возвращаемое этой ф-ей
# ф-и генераторы тоже возвращают значения, но они могут возвращать значения из последовательности несколько раз.
# И возвращают они это значение не при помощи ключевого слова return, а при помощи слова yield (йилд)
# Слово yield в английском языке имеет много значений: уступать, выработать. То есть - вырабатываются какие-то значения.
# И вырабатываться они могут несколько раз.
# В случае с обычной ф-ий мы получаем возвращаемое значение. В случае с функцией-генераторм, мы возвращаем генератор.
# Генератор, в свою очередь, является итератором.
# Это немного запутано, поэтому, лучше разобрать на практике.
# Создадим нашу первую функцию-генератор
# Пример генератора
def coundown(num):
print('String')
while num > 0:
yield num
num -= 1
# Здесь ключевое слово yield - это операция вычисления следующего значения генератора.
# В одни момент времени оно вычсисляет одно значение и передает его.
def count_up_to(x): # 'считать до..'
count = 1 # создаем переменную с начальным значением
while count <= x:
yield count
count += 1
# Запускаем ф-ю
print(count_up_to(3))
# <generator object count_up_to at 0x02E9EAE0>
# Мы получаем generator object - объект класса generator
# Эта ф-я произвела генератор.
# Когда мы используем в ф-и ключевое слово yield, автоматически мы получаем из этой ф-и генератор.
# С этим генератором мы можем поместить значение, которое вырабатывает эта ф-я, в переменную.
counter = count_up_to(3)
# Nак как это генератор, который в свою очередь является итератором, у него есть метод next()
# И этот метод мы можем назначить переменной.
# print(counter.__next__()) # 1
# # если повторим
# print(counter.__next__()) # 2
# print(counter.__next__()) # 3
# print(counter.__next__()) # StopIteration
# На последенм шаге получаем ошибку StopIteration
# То же самое, по-мимо метода next(), мы можем проделать при помощи ф-и next.
print(next(counter)) # 1
print(next(counter)) # 2
print(next(counter)) # 3
# Что же тут происходит?
# def count_up_to(x): # 'считать до..'
# count = 1 # создаем переменную с начальным значением
# while count <= x:
# yield count
# count += 1
# Эта фунция-генератор count_up_to, которая генерирует сам генератор,
# который мы потом помещаем в counter = count_up_to(3), он запускается.
# После слова yield вырабатывается текущее значение, которое помещено в count.
# После этого ф-я как-бы становится на паузу, засыпает.
# После этого значение возвращается из этой ф-и, count прибавляется 1, count += 1. Ф-я остановлена.
# И, при последующем запуске в count уже находится не 1, а 2. То есть, запоминается предыдущее значение.
# Оно не обнуляется, как в случае с обычной ф-ей. И, каждый раз, мы получаем новое значение до тех пор,
# пока не будет получена ошибка StopIteration
# То есть, при помощи ф-и генератора, мы можем легко получать пследовательность iterable и использовать ее в цикле.
for number in count_up_to(10): # вызываем ее в цикле
print(number)
# получаем вывод от 1 до 10
# То есть, нам не нужно имплементировать для этого, как мы делали в кастомном классе MyRange (пакет 2_custom_iterable)
# методы next и iter.
# Здесь при помощи вот такой ф-и генератора мы можем создавать такую же функциональность.
# Фишка в том, что при помощи ф-и генератора, когда мы вырабатываем значение с помощью ключевого слова yield,
# мы не обнуляем все наши действия. И, в следующий раз, при вызове метода next, будет использовано значение не с начала,
# а с того значения, на котором была остановка.
# В цикле for остановка происходит также на выбросе ошибки StopIteration, но там эта ошибка обрабатывается
# при помощи конструкции try - except, поэтому мы ее не замечаем
# и программа не останавливается и выполянет следующий код.
# Резюме по созданной нами ф-и count_up_to()
# Здесь мы также можем работать не только с числами, но и с любыми последовательностями и производить любые манипуляции.
# Эта ф-я, логика которой может быть какой угодно до ключевого слова yield, которое вырабатываем какое-то значение,
# которое будет помещено в переменную. Затем ф-я останавливается/засыпает и сохраняет свое состояние -
# запоминает, где она остановилась. И, при следующем вызове, будет продолжена работа с запомненного значения.
# То есть, будет выработан следующий элемент последовательности. Каждый раз вырабатывается только один элемент из
# последовательности. Мы, конечно, можем сгенерировать все элементы последовательности при помощи list
print(list(count_up_to(7))) # [1, 2, 3, 4, 5, 6, 7]
# получим последовательность в форме списка
# Важно почувствать разницу между генераторами списков и просто генераторами.
# ---------------------------- Генератор списков -----------------------------------
n = 100
simple_list = [x ** 3 for x in range(n)] # вычисляем кубическое значение для каждого элемента последовательности
print(simple_list) # [0, 1, 8, 27, 64]
print(type(simple_list)) # <class 'list'>
# Выведем элементы данного списка построчно
for i in simple_list:
print(i)
# 0
# 1
# 8
# 27
# 64
# -------------------------------- Неявный генератор ----------------------------
# Неявные генераторы создаются с помощью круглых скобок
simple_generator = (x ** 3 for x in range(n))
print(simple_generator) # <generator object <genexpr> at 0x00DCCA38>
print(type(simple_generator)) # <class 'generator'>
for i in simple_generator:
print(i)
# 0
# 1
# 8
# 27
# 64
# Казалось бы, результат одинаковый.
# Но, давайте проверим, какую память занимают оба типа.
# Для этого выведем модуль sys
import sys
print(f'Занятая память при генераторе списков: {sys.getsizeof(simple_list)}')
print(f'Занятая память при неявном генераторе {sys.getsizeof(simple_generator)}')
# При малых значениях передаваемой переменной 'n', разница не ощутима, но если увеличить вычисляемые значения,
# то разница будет значительной.
# При значении n = 100:
# Занятая память при генераторе списков: 452
# Занятая память при неявном генераторе 56
# У списка используемая память растет согласно количеству используемых элементов в листе.
# У генератора ничего не меняеттся, как было 56 при малых значениях, так и остатется 56 при увеличении значения.
# Потому что генератор каждый раз работает только с одним элементом.
# Ему нет необходимости хранить все эти данные в памяти.
# Лист же хранит в памяти все элементы последовательности.
# Вопрос: почему в случае неявного генератора всегда 56 байт, а не меньше?
# Ведь для хранения одного числа требуется гараздо меньше.
# Ответ: в неявном генераторе записан еще алгоритм выработки следующего элемента последовательности.
# Почему генератор называется неявным?
# Это генератор, но очень простой. В нем не используются никакие особые свойства генераторов.
# --------------------------------- Явные генераторы -------------------------------------------
# Для начала напишем явный генератор, который в предыдущем коде создан неявно
# Генераторы определяются через функции.
def generator_example_1(num): # на вход подаем то количество элементов, которое хотим создать.
for i in range(num):
yield i ** 3 # вычисляем куб каждого элемента последовательности
# Дальше, чтобы этот генератор инизиализивать, нам нужно его вызвать и использвать ключевое слово next
gen = generator_example_1(10)
print(next(gen)) # 0
print(next(gen)) # 1
print(next(gen)) # 8
print(next(gen)) # 27
# Кажыдый раз, когда мы вызываем next, происходит вычисленипе следующего элемента в генераторе,
# также, как в неявном генераторе и следующий элемент поступает на выход.
# Это удобне еще и тем, что мы можем генератор вызывать их любого места программы,
# и он каждый раз выдаст следующий элемент.
# Часто это в коде нужно - такое наравномерное использование массивов.
# Когда нужно вот такое неравномерное использование, тогда генераторы сильно помогают.
# Более сложный пример явного генератора (то есть генератор реализованный через код без ф-и yield)
# Создадим список из машин и цветов их раскраски, и померяем объем памяти и время выполнения,
# которое необходимо для обхода полученного списка.
# импортируем необходимые модули
print('Используем Явный генератор')
import time
import os
import random # для создания списков
import psutil # модуль, измеряющий количество памяти в кэше, занимаемой каждым объектом.
# Модуль с цветами раскраски
colors = ['White', 'Black', 'Green']
# Модуль с машинами
brands = ['Volvo', 'BMW', 'Audi']
# Создаем ф-ю, которая формирует список: машина-цвет-id
def cars(num):
cars_list = []
for i in range(num):
car = {'color': random.choice(colors),
# делаем случайный выбор из списка цветов и вносим его в словарь с ключом 'color'
'brand': random.choice(brands),
# делаем случайный выбор из списка машин и вносим его в словарь с ключом 'brand'
'id': i}
cars_list.append(car) # вносим созданный словарь в список
return cars_list
# Перед выполнение ф-и, померяем используемую память.
# Для этого запустим модуль из библиотеке psutil
proc = psutil.Process(os.getpid()) # здесь читаем текущее место в памяти и записываем значение в переменную proc
print('Используемая память до выполнения функции: ' + str(proc.memory_info().rss/1000000))
# Здесь указываем текущую память до выполнения ф-и. Используем метод memory_info()
# В атрибуте rss хранится текущее значение используемой памяти в кэше.
# Приводим в читаемый вид rss/1000000
# Засекаем время старта процесса
start = time.process_time()
# Далее вызываем нашу ф-ю с кодом
# Чтбы этот cars_list записался в память, нам необходимо вызвать его ф-ю.
cars_list = cars(1000000) # передаем в ф-ю для создания списка количество объектов и присваиваем результат переменной
# Засекаем время окончания процесса
stop = time.process_time()
# Далее меряем память после выполнения ф-и
proc = psutil.Process(os.getpid()) # здесь читаем текущее место в памяти
# Выводим:
print('Используемая память после выполнения функции: ' + str(proc.memory_info().rss/1000000))
print('Заняло времени {} секунд'.format(stop - start))
# Используемая память до выполнения функции: 12.816384
# Используемая память после выполнения функции: 164.179968
# Заняло времени 2.6875 секунд
# Теперь сделаем то же самое, но с использованием неявного генератора
# Перед запуском надо закомментировать ф-ю Явного генератора (предыдущий код)
print('\nИспользуем Неявный генератор')
def cars_gen(num):
# здесь нам лист не нужен
for i in range(num):
car = {'color': random.choice(colors),
# делаем случайный выбор из списка цветов и вносим его в словарь с ключом 'color'
'brand': random.choice(brands),
# делаем случайный выбор из списка машин и вносим его в словарь с ключом 'brand'
'id': i}
# вместо return используем ключевое слово yield и поместим его в цикл
yield car # здесь каждый раз будем возвращать один объект
# Генератор создали, теперь необходимо создать объект
proc = psutil.Process(os.getpid())
print('Используемая память до выполнения функции: ' + str(proc.memory_info().rss/1000000))
start = time.process_time()
cars_generator = cars_gen(1000000)
stop = time.process_time()
proc = psutil.Process(os.getpid())
print('Используемая память после выполнения функции: ' + str(proc.memory_info().rss/1000000))
print('Заняло времени {} секунд'.format(stop - start))
# Используемая память до выполнения функции: 12.77952
# Используемая память после выполнения функции: 12.808192
# Заняло времени 0.0 секунд
# В результате видим, что память практически не изменилась с использованием ф-и.
# По времени это ничего не заняло.
# То есть, вызывая миллион машин поэлементно, мы значительно разгружаем ресурсы компьютера.
# Именно ради этого и используеются неявные Генераторы в отличии от Генераторов списков.
| true |
4d4a379d90ff7d9a0664384ba11068dff0bbff8f
|
Python
|
dgrzan/MarketAnalysis
|
/Project2/project2.py
|
UTF-8
| 2,452 | 3.046875 | 3 |
[] |
no_license
|
import matplotlib.pyplot as plt
import numpy as np
import random
from matplotlib.backends.backend_pdf import PdfPages
from scipy import stats
import math
from scipy.special import factorial
from scipy.optimize import curve_fit
def poisson(k,lamb):
return (lamb**k/factorial(k)) * np.exp(-lamb)
if __name__ == "__main__":
with PdfPages("Project2_David_Grzan.pdf") as pdf:
#makes gaussian histogram
n = 100000
n2 = 100
garray = []
a = 0
for j in range(0,n):
for i in range(0,n2):
a+=random.uniform(-1,1)
garray.append(a*(float(n2/3))**(-1/2))
a = 0
plt.figure()
plt.title("Gaussian Distribution")
plt.hist(garray,100,density=0,range=(-4,4),facecolor="g")
pdf.savefig()
plt.close()
#makes normalized gaussian histogram with fit
m, s = stats.norm.fit(garray)
line = stats.norm.pdf(np.linspace(-4,4,100),m,s)
fig = plt.figure()
plt.plot(np.linspace(-4,4,100),line)
plt.title("Normalized Gaussian Distribution")
plt.hist(garray,100,density=1,range=(-4,4),facecolor="r")
plt.text(0.75,0.4,"Mean: {:.2f}, Sigma: {:.2f}".format(m,s),size=10)
pdf.savefig()
plt.close()
#makes poisson histogram
e = math.e
parray = []
lamb = 5
k = 0
p = 1.0
L = e**(-lamb)
for i in range(0,100000):
while p>L:
k = k+1
p = p*random.uniform(0,1)
parray.append(k-1)
p = 1.0
k = 0
plt.figure()
plt.title("Poisson Distribution")
plt.hist(parray,20,density=0,range=(-0.5,19.5),facecolor="g")
pdf.savefig()
plt.close()
#makes normalized poisson histogram with fit
plt.figure()
plt.title("Normalized Poisson Distribution")
entries, bin_edges, patches = plt.hist(parray,20,density=1,range=(-0.5,19.5),facecolor="r")
print(bin_edges)
bin_middles = 0.5*(bin_edges[1:] + bin_edges[:-1])
parameters, cov_matrix = curve_fit(poisson, bin_middles, entries)
xaxis = np.linspace(0,20,1000)
plt.plot(xaxis, poisson(xaxis, *parameters))
print(parameters)
plt.text(10,0.125,"Lambda: {:.2f}".format(*parameters),size=10)
pdf.savefig()
plt.close()
| true |
b4c63c94e828515b809719fb8f2f263f5685375c
|
Python
|
stang84/Debug
|
/jupyter-image-display-error/rename.py
|
UTF-8
| 386 | 3.1875 | 3 |
[] |
no_license
|
import os
import glob
ext = input("Enter file extension type: ")
while not ext[0].isalpha():
ext = input("Enter file extension type after the '.': ")
cap = ext[0].isupper()
n = len(ext)
if cap:
ext = ext.lower()
else:
ext = ext.upper()
files = glob.glob('*.'+ ext)
for file in files:
print(file, 'converted to', file[:-n]+ext)
os.rename(file, file[:-n]+ext)
| true |
b4e93311b5d410c4625e4374d3963bd363ad90c0
|
Python
|
pbuzzo/Craigslist-Scraper
|
/scraper.py
|
UTF-8
| 5,535 | 2.703125 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Given at least one webpage,
search for certain price points and products availability routinely.
"""
from bs4 import BeautifulSoup
import requests
# import schedule
import time
from datetime import datetime
import logging
import argparse
import sys
import signal
__author__ = "Patrick Buzzo"
stdout_fileno = sys.stdout
logger = logging.getLogger(__file__)
url = "https://chicago.craigslist.org/d/software-qa-dba-etc/search/sof"
craigspage = requests.get(url)
logger.info('Downloading page %s...' % url)
logger.info('Checking for posts...')
craigspage.raise_for_status()
soup = BeautifulSoup(craigspage.content, 'html.parser')
exit_flag = False
keyword_list = [
'developer',
'PHP',
'tech'
]
def signal_handler(sig, frame):
"""
When SIGINT or SIGTERM is provided, this function will
alter global exit flag variable to cause an exit
"""
logger.info('Signal Received By Program: ' + signal.Signals(sig).name)
global exit_flag
if sig == signal.SIGINT or signal.SIGTERM:
logger.info('Shutting Down Craigslist Program...')
# Will cause termination of program, when changed to true,
# the program terminates
exit_flag = True
def searcher():
with open('craigslist.txt', 'a+') as medFile:
medFile.write(
f"<----------- Results From: {datetime.now()} START ----------->\n\n"
)
liTag = soup.find_all("li", {"class": "result-row"})
for div in liTag:
pTags = div.find_all("p", {"class": "result-info"})
for pars in pTags:
aTags = pars.find_all("a", {"class": "result-title"})
for anchor in aTags:
for word in keyword_list:
lowered = word.lower()
lower_anchor = anchor.text.lower()
if lowered in lower_anchor:
medFile.write("Title: " + anchor.text + " URL: " + anchor.get('href'))
medFile.write('\n')
medFile.write(
f"\n<----------- Results From: {datetime.now()} FINISH ----------->"
)
line_count = 0
lines_seen = set() # holds lines already seen
with open("output_file.txt", "w+") as output_file:
for each_line in open("craigslist.txt", "r"):
if each_line not in lines_seen: # check if line is not duplicate
if each_line[0] != "<":
if each_line[0] != "\n":
output_file.write(each_line)
lines_seen.add(each_line)
with open("output_file.txt", "r") as output_file:
count = 0
for line in output_file:
count += 1
if count == 0:
line_count = count
elif line_count < count:
logger.info(f"New Found Jobs!\nCurrent Findings In Queue: {count}")
line_count = count
elif line_count == count:
logger.info(f"No New Jobs Found!\nCurrent Findings In Queue: {count}")
logger.info('Rechecking for posts...')
def create_parser():
"""
Create parser to parse command line arguments supplied by user
"""
parser = argparse.ArgumentParser()
parser.add_argument('--keys', type=str, nargs="+", default="manager",
help='Keywords to watch for')
parser.add_argument('--interval', type=float, default=10.0,
help='Number of seconds between scraping Craigslist')
return parser
def emailer():
"""
Create function to routinely search through
job results and email user if new ones arefound since last check.
"""
def main():
# connect logger with console output, ref Piero walkthrough
logging.basicConfig(
filename='dirwatcher.log',
level=logging.DEBUG,
format='%(asctime)s.%(msecs)03d %(name)-12s %(levelname)-8s [%(threadName)-12s] %(message)s',
datefmt='%Y-%m--%d %H:%M:%S'
)
logger.setLevel(logging.DEBUG)
# Timestamp
app_start_time = datetime.now()
# Start banner
logger.info(
'\n'
'---------------------------------------------------------------\n'
' Running {0}\n'
' Started on {1}\n'
'---------------------------------------------------------------\n'
.format(__file__, app_start_time.isoformat())
)
# Parse command-line arguments to be used
parser = create_parser()
args = parser.parse_args()
# Watch for SIGINT or SIGTERM during running of program
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
while not exit_flag:
try:
searcher()
except OSError:
logger.error('This directory does not exist')
time.sleep(5)
except Exception as e:
logger.error('Unhandled exception: {}'.format(e))
# execute watch_dirs() every [args.interval] seconds
time.sleep(args.interval)
# Convert uptime of program to be displayed in closing banner
uptime = datetime.now() - app_start_time
# Closing banner
logger.info(
'\n'
'---------------------------------------------------------------\n'
' Stopped {0}\n'
' Uptime on {1}\n'
'---------------------------------------------------------------\n'
.format(__file__, str(uptime))
)
if __name__ == '__main__':
main()
| true |
bd75b49f0b1f036a1a975dece9b37d906a71de45
|
Python
|
DiamondHusky/CrawlerDemo
|
/02DataStorage/mongo_demo/demo1.py
|
UTF-8
| 994 | 2.796875 | 3 |
[] |
no_license
|
import pymongo
# 获取连接mongodb的对象
client = pymongo.MongoClient("127.0.0.1",port=27017)
# 获取数据库(如果没有zhihu数据库,也可以)
# db = client.wade
db = client['admin']
db.authenticate('sa', 'sa')
# 获取数据库中的表(qa也可以不存在)
collection = db.qa
# 写入数据
# collection.insert({"username":"靓仔"})
collection.insert_many([
{
"username":"aaa",
"age":20
},
{
"username":"aaa",
"age":19
}
])
# 查找数据
# cursor = collection.find()
# for x in cursor:
# print(x)
# 2、获取集合中一条数据
# result = collection.find_one({"age":18})
# print(result)
# 更新数据
# collection.update_one({"username":"aaa"},{"$set":{"username":"ccc"}})
# 更新过个,把所有aaa的都改成hhh
# collection.update_many({"username":"aaa"},{"$set":{"username":"hhh"}})
# 删除数据
# collection.delete_one({"username":"Kobe"})
collection.delete_many({"username":"aaa"})
| true |
a971b0b7d1f1c8dae84b7e3b34dbe9f6053a4be7
|
Python
|
Rutrle/The-Self-Taught-Programmer-by-Althoff
|
/chapter_6/ch_6_challenge_5.py
|
UTF-8
| 352 | 3.78125 | 4 |
[] |
no_license
|
given_list = ["The", "fox", "jumped", "over", "the", "fence", "."]
created_string = " ".join(given_list)
#not needed, but general solution to these types of problems
while " ." in created_string:
rest = created_string[created_string.index('.'):]
created_string = created_string[:created_string.index('.')-1]+rest
print(created_string)
| true |
b4d3267a45ab0c1cb43266580168ee404df56fb7
|
Python
|
WweiL/LeetCode
|
/93-restore_IP_addr.py
|
UTF-8
| 693 | 2.765625 | 3 |
[] |
no_license
|
class Solution:
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
if len(s) > 12:
return []
ans = []
self.addr("", ans, 4, s, 0)
return ans
def addr(self, tmp, ans, numBlock, s, i):
n = len(s)
if numBlock == 0 and i == n:
ans.append(tmp[:-1])
else:
if i < n and s[i] == '0':
self.addr(tmp+'0.', ans, numBlock-1, s, i+1)
for j in range(1, 4):
if i+j <= n and int(s[i:i+j]) <= 255 and s[i] != '0':
self.addr(tmp+s[i:i+j]+".", ans, numBlock-1, s, i+j)
| true |
938f870aaba3f9f61388a3c337501e6111d42868
|
Python
|
laurenriddle/The-Family-Dictionary-practice-python-bk1-ch5
|
/family_dict.py
|
UTF-8
| 718 | 4.875 | 5 |
[] |
no_license
|
# Define a dictionary that contains information about several members of your family.
my_family = {
"sister": {
"name": "Krista",
"age": 42
},
"mother": {
"name": "Cathie",
"age": 70
},
"father": {
"name": "James",
"age": 75
}
}
# Using a dictionary comprehension, produce output that looks like the following example: Krista is my sister and is 42 years old.
dict_2 = {f'{my_family[member]["name"]} is my {member} and is {my_family[member]["age"]} years old' for member in my_family}
print(dict_2)
# regular for loop
for member in my_family:
print(f'{my_family[member]["name"]} is my {member} and is {my_family[member]["age"]} years old')
| true |
09c5c60793a285b26c7c353f8887ab88f36f1de2
|
Python
|
zinderud/ysa
|
/sklearn/breast-cancer.py
|
UTF-8
| 787 | 2.703125 | 3 |
[
"Apache-2.0"
] |
permissive
|
import numpy as np
from sklearn.preprocessing import Imputer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import pandas as pd
veri = pd.read_csv("data.data")
veri=veri.replace('?', -222, inplace='true')
veri=veri.drop(['id'], axis=1)
y = veri.benormal
x = veri.drop(['benormal'], axis=1)
imp = Imputer(missing_values=-222, strategy="mean",axis=0)
x = imp.fit_transform(x)
tahmin = KNeighborsClassifier(n_neighbors=4, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='euclidean', metric_params=None, n_jobs=1)
tahmin.fit(x,y)
ytahmin = tahmin.predict(x)
basari = accuracy_score(y, ytahmin, normalize=True, sample_weight=None)
print("Yüzde",basari*100," oranında:" )
print(tahmin.predict([1,1,1,1,1,1,1,1,1,1]))
| true |
92f4e9a1718e129e581de6542cd2275286f5fe33
|
Python
|
uhmppi1/RL_Edu1
|
/lecture/lab6/cartPoleNN.py
|
UTF-8
| 3,383 | 2.625 | 3 |
[] |
no_license
|
import numpy as np
import tensorflow as tf
import gym
import matplotlib.pyplot as plt
env = gym.make('CartPole-v0')
input_size = env.observation_space.shape[0]
layer1_size = 10
layer2_size = 10
output_size = env.action_space.n
#learning_rate = 1e-1 # counts per episode: 9.5085
learning_rate = 1e-2 # counts per episode: 95.3945
X = tf.placeholder(shape=[1,input_size],dtype=tf.float32)
#W1 = tf.Variable(tf.random_uniform([input_size,layer1_size],-1,1))
W1 = tf.get_variable("W1",shape=[input_size, layer1_size], initializer=tf.contrib.layers.xavier_initializer())
Z1 = tf.nn.sigmoid(tf.matmul(X,W1))
#Z1 = tf.nn.tanh(tf.matmul(X,W1))
#W2 = tf.Variable(tf.random_uniform([layer1_size, output_size],-1,1))
W2 = tf.get_variable("W2",shape=[layer1_size, layer2_size], initializer=tf.contrib.layers.xavier_initializer())
Z2 = tf.nn.sigmoid(tf.matmul(Z1,W2))
#Z2 = tf.nn.tanh(tf.matmul(Z1,W2))
#W2 = tf.Variable(tf.random_uniform([layer1_size, output_size],-1,1))
W3 = tf.get_variable("W3",shape=[layer2_size, output_size], initializer=tf.contrib.layers.xavier_initializer())
Qpred = tf.matmul(Z2, W3)
Y = tf.placeholder(shape=[1,output_size],dtype=tf.float32)
loss = tf.reduce_sum(tf.square(Y - Qpred))
train = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss)
#train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
dis = .9
num_episodes = 2000
rList = []
subRewardList = []
avgRewardList = []
init = tf.global_variables_initializer()
#with tf.Session() as sess:
sess = tf.Session()
sess.run(init)
for i in range(num_episodes):
e = 1. / (i/10 + 1)
s = env.reset()
rAll = 0
done = False
while not done:
x = np.reshape(s, [1, input_size])
Qs = sess.run(Qpred, feed_dict={X:x})
if np.random.rand(1) < e:
a = env.action_space.sample()
else :
a = np.argmax(Qs)
s1, reward, done, _ = env.step(a)
if done :
Qs[0,a] = -100
else :
x1 = np.reshape(s1,[1,input_size])
Qs1 = sess.run(Qpred, feed_dict={X:x1})
Qs[0,a] = reward + dis * np.max(Qs1)
sess.run(train, feed_dict={X:x, Y:Qs})
rAll += reward
s = s1
subRewardList.append(rAll)
rList.append(rAll)
if (i+1) % 100 == 0 :
avg = sum(subRewardList)/100
avgRewardList.append(avg)
print(i+1, ': avg =', avg)
subRewardList = []
#layer2: learning rate = 0.01, sigmoid, counts per episode: 36.436
#layer2: learning rate = 0.01, tanh, counts per episode: ?
#layer3: learning rate = 0.01, sigmoid, counts per episode: 38.1825
#layer3: learning rate = 0.01, tanh counts per episode: 84.9365
#layer3: learning rate = 0.001, counts per episode: 96.272
print('counts per episode:', str(sum(rList) / num_episodes ) )
plt.ylim(0,300)
plt.bar(range(len(avgRewardList)), avgRewardList, color='blue', bottom=0)
plt.show()
'''
count = 0
while count < 100:
s = env.reset()
count += 1
done = False
rAll = 0
while not done:
env.render()
x = np.reshape(s,[1,input_size])
Qs = sess.run(Qpred, feed_dict={X:x})
a = np.argmax(Qs)
s1, reward, done, _ = env.step(a)
rAll += reward
if done:
break;
else:
s = s1
print('Test:',count, ':', rAll)
'''
| true |
870fddb371579f6cf5821541c33880df41be5327
|
Python
|
melodyyyang/practice
|
/app/id_generator.py
|
UTF-8
| 134 | 2.578125 | 3 |
[] |
no_license
|
class IdGenerator():
def __init__(self):
self._id = 0
def next(self):
self._id += 1
return self._id
| true |
b1dae475a34de880906c27ae5ef4b44b9a4b4f28
|
Python
|
Rogerwlk/Information-Retrieval-Search-Engine
|
/P2/P2/query_static.py
|
UTF-8
| 11,137 | 2.84375 | 3 |
[
"MIT"
] |
permissive
|
import re, html, nltk, os, argparse, time
from w3lib.html import replace_entities
from nltk.stem.porter import PorterStemmer
from math import log
from math import sqrt
from collections import Counter
# global variable
idx_table = {}
docu_table = {}
stop_words = set()
def dotAcronym(match):
temp = match.group()
temp = temp.replace('.', '')
return temp
def digitAlpha(match):
num, word = match.group(1), match.group(2)
temp = num + word
if len(word) >= 3 and not word in stop_words:
temp += ' ' + word
return temp
def alphaDigit(match):
word, num = match.group(1), match.group(2)
temp = word + num
if len(word) >= 3 and not word in stop_words:
temp += ' ' + word
return temp
def prefixReplace(match):
prefix, stem = match.group(1), match.group(2)
temp = prefix + stem
if not stem in stop_words:
temp += ' ' + stem
return temp
def hyphenReplace(match):
temp = match.group()
li = temp.split('-')
temp = temp.replace('-', '')
for item in li:
if not item in stop_words:
temp += ' ' + item
return temp
def validDate(month, day, year):
if month <= 0 or month > 12 or year > 2018 or year < 0 or day <= 0:
return False
if (month == 1 or month == 3 or month == 5 or month == 7 or month == 8 or month == 10 or month == 12) and day > 31:
return False
if month == 2 and day > 29:
return False
if (month == 4 or month == 6 or month == 9 or month == 11) and day > 30:
return False
return True
def dateReplace(match):
date = match.group()
# date convert to number
if date[0] >= '0' and date[0] <= '9':
d = re.split(r'[-/]', date)
month, day, year = int(d[0]), int(d[1]), int(d[2])
else:
d = date.split()
t_m, t_d, year = d[0], d[1], int(d[2])
# convert jan(uary) feb(urary) into number 1, 2 ...
if t_m[0] == 'j':
if t_m[1] == 'a':
month = 1
elif t_m[2] == 'n':
month = 6
else:
month = 7
elif t_m[0] == 'f':
month = 2
elif t_m[0] == 'm':
if t_m[2] == 'r':
month = 3
else:
month = 5
elif t_m[0] == 'a':
if t_m[1] == 'p':
month = 4
else:
month = 8
elif t_m[0] == 's':
month = 9
elif t_m[0] == 'o':
month = 10
elif t_m[0] == 'n':
month = 11
else:
month = 12
# convert '10' or '1' into number
if t_d[1] >= '0' and t_d[1] <= '9':
day = 10 * int(t_d[0]) + int(t_d[1])
else:
day = int(t_d[0])
# end of date conversion into number
if not validDate(month, day, year): # check date
return '' # remove this date
if year < 100:
if year <= 18:
year += 2000
else:
year += 1900
s_month = str(month)
if month < 10:
s_month = '0' + s_month
s_day = str(day)
if day < 10:
s_day = '0' + s_day
return s_month+'/'+s_day+'/'+str(year)
def queryPreprocessing(query, args):
# regular expressions
p_tag_comment = re.compile(r'(<.*?>|<!--.*-->)')
p_alpha_digit = re.compile(r'\b([a-z]+)-([0-9]+)\b', re.I)
p_digit_alpha = re.compile(r'\b([0-9]+)-([a-z]+)\b', re.I)
p_dot_acronym = re.compile(r'\b([a-z]+\.)+[a-z]+(\.|\b)', re.I)
p_date = re.compile(r"""\b
([0-9]{1,2}/[0-9]{1,2}/[0-9]{2,4})
|([0-9]{1,2}-[0-9]{1,2}-[0-9]{2,4})
|(((Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)[.]?
|January|February|March|April|May|June|July|August
|September|October|November|December)
\ [0-9]{1,2}(st|nd|rd|th)?,\ [0-9]{2,4})
\b""", re.VERBOSE | re.I)
p_docno = re.compile(r'(?:<DOCNO>\s*)(.+)(?:\s*</DOCNO>)', re.I)
p_num1 = re.compile(r',([0-9]{3})')
p_num2 = re.compile(r'\b(\d+)[.]0+\b')
p_file_extension = re.compile(r'([^\\\/:*?\"<>|\s]+)\.(aif|cda|mid|midi|mp3|mpa|ogg|wav|wma|wpl|7z|arj|deb|pkg|rar|rpm|tar\.gz|z|zip|bin|dmg|iso|toast|vcd|csv|dat|db|dbf|log|mdb|sav|sql|tar|xml|apk|bat|bin|cgi|pl|com|exe|gadget|jar|py|wsf|fnt|fon|otf|ttf|ai|bmp|gif|ico|jpeg|jpg|png|ps|psd|svg|tif|tiff|asp|aspx|cer|cfm|css|htm|html|js|jsp|part|php|rss|xhtml|key|odp|pps|ppt|pptx|class|cpp|cs|h|java|sh|swift|vb|ods|xlr|xls|xlsx|bak|cab|cfg|cpl|cur|dll|dmp|drv|icns|ico|ini|lnk|msi|sys|tmp|3g2|3gp|avi|flv|h264|m4v|mkv|mov|mp4|mpg|mpeg|rm|swf|vob|wmv|doc|docx|odt|pdf|rtf|tex|txt|wks|wps|wpd)', re.I)
p_prefix = re.compile(r'\b(a|an|ante|anti|auto|circum|co|com|con|contra|contro|de|dis|en|em|ex|extra|fore|hetero|homo|homeo|hyper|il|im|in|ir|inter|intra|intro|macro|micro|mid|mis|mono|non|omni|over|post|pre|pro|re|semi|sub|super|sym|syn|trans|tri|un|under|uni)-([a-z])+\b', re.I)
p_hyphen = re.compile(r'\b(\w+-)+\w+\b')
# create a porter stemmer
stemmer = PorterStemmer()
# convert all character references (e.g. >, >, &x3e;) to unicode
query = replace_entities(query)
query = html.unescape(query)
# some queries have '/', need to be handled specifically
query = query.replace('/', ' / ')
# convert to lower case
query = query.lower()
# expand file extension
query = p_file_extension.sub(r'\g<1>\g<2> \g<2>', query)
# ph.D. -> phd
query = p_dot_acronym.sub(dotAcronym, query)
# convert date to mm/dd/yyyy format or remove it if invalid
query = p_date.sub(dateReplace, query)
# digit format
query = p_num1.sub(r'\g<1>', query) # remove ',' in 1,000
query = p_num2.sub(r'\g<1>', query) # remove '.00' in 1.00
# expand digit-alpha format
query = p_digit_alpha.sub(digitAlpha, query)
# expand alpha-digit format
query = p_alpha_digit.sub(alphaDigit, query)
# expand stem with hyphen prefix
query = p_prefix.sub(prefixReplace, query)
# expand hyphenated word
query = p_hyphen.sub(hyphenReplace, query)
# tokenize query
query = nltk.word_tokenize(query)
# apply Porter Stemmer
if args.index_type == 'stem':
query = [stemmer.stem(word) for word in query]
# remove term not in idx_table (value will be 0 for all retrieval)
query = [x for x in query if x in idx_table]
return query
def df(term):
return len(idx_table[term])
def idf(term):
return log(len(docu_table) / df(term), 10)
def cosineSimilarity(query, docno, query_counter):
c_dividend = 0 # dividend in formula
c_divisor_d = 0 # document divisor in formula
c_divisor_w = 0 # query divisor in formula
for term in query:
w = query_counter[term] * idf(term)
d = docu_table[docno][term] * idf(term)
c_dividend += w * d
c_divisor_w += w ** 2
for term in docu_table[docno]:
d = docu_table[docno][term] * idf(term)
c_divisor_d += d ** 2
return c_dividend / sqrt(c_divisor_d * c_divisor_w)
def bm25Similarity(query, docno, query_counter, totaldl):
k1 = 1.2
k2 = 500
b = 0.75
N = len(docu_table)
D = sum(docu_table[docno].values())
avgdl = totaldl / len(docu_table)
res = 0
for term in query:
n = len(idx_table[term])
tf = idx_table[term][docno]
p1 = (k1 + 1) * tf / (tf + k1 * (1 - b + b * D / avgdl))
p2 = (k2 + 1) * query_counter[term] / (k2 + query_counter[term])
res += idf(term) * p1 * p2
return res
def lmSimilarity(query, docno, query_counter, totaldl):
D = sum(docu_table[docno].values())
avgdl = totaldl / len(docu_table)
res = 0
for term in query:
tf = idx_table[term][docno]
tfC = sum(idx_table[term].values())
res += log(((tf + avgdl * tfC / totaldl) / (D + avgdl)), 10)
return res
def relevanceRanking(query, args, totaldl):
result = Counter()
# find relevant doc containing at least one query term
relevant_doc = set()
for term in query:
for docno in idx_table[term]:
relevant_doc.add(docno)
# build query counter to calculate tf in query
query_counter = Counter()
for term in query:
query_counter[term] += 1
# calculate relevance
for docno in relevant_doc:
if args.retrieval_model == 'cosine':
result[docno] = cosineSimilarity(query, docno, query_counter)
elif args.retrieval_model == 'bm25':
result[docno] = bm25Similarity(query, docno, query_counter, totaldl)
elif args.retrieval_model == 'lm':
result[docno] = lmSimilarity(query, docno, query_counter, totaldl)
return result.most_common(100)
def parseSTPI(line, idx):
parts = line.split()
# term, docno, term frequency, term positions
return [(parts[0], parts[1]), int(parts[2]), parts[3:], idx]
def parsePI(line, idx):
parts = line.split()
if parts[2].startswith('FR'):
return [(parts[0]+' '+parts[1], parts[2]), int(parts[3]), idx]
else:
return [(parts[0]+' '+parts[1]+' '+parts[2], parts[3]), int(parts[4]), idx]
def parseCommand():
parser = argparse.ArgumentParser(description='Static query processing. Runs the specified retrieval model on the specified index type. Accepts 5 arguments.')
parser.add_argument('index_path', help='The path of index files.')
parser.add_argument('query_file', help='The path and name of query file.')
parser.add_argument('retrieval_model', choices=['cosine', 'bm25', 'lm'], help='retrieval model: cosine / bm25 / lm')
parser.add_argument('index_type', choices=['single', 'stem'], help='[single / stem] for single term index / stem index')
parser.add_argument('output_path', help='The path of output file (Retrieval result).')
args = parser.parse_args()
if args.index_path[0] == '/':
args.index_path = '.' + args.index_path
if args.index_path[-1] != '/':
args.index_path += '/'
if args.output_path[0] == '/':
args.output_path = '.' + args.output_path
if args.output_path[-1] != '/':
args.output_path += '/'
return args
def loadIndexTables(args):
# load stop word set
f = open('stops.txt', 'r')
for line in f:
stop_words.add(line.strip())
f.close()
totaldl = 0
# load index table
if args.index_type == 'single':
f = open(args.index_path+'single_term_idx.txt', 'r')
elif args.index_type == 'stem':
f = open(args.index_path+'stem_idx.txt', 'r')
for line in f:
parts = line.split()
if not parts[0] in idx_table:
idx_table[parts[0]] = Counter()
if not parts[1] in docu_table:
docu_table[parts[1]] = Counter()
# term, docno, term frequency
idx_table[parts[0]][parts[1]] = int(parts[2])
docu_table[parts[1]][parts[0]] = int(parts[2])
totaldl += int(parts[2])
f.close()
return totaldl
if __name__ == '__main__':
t1 = time.time()
args = parseCommand()
totaldl = loadIndexTables(args)
# open input query file and output retrieval file
input_file = open(args.query_file, 'r')
# create output directory if not exists
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
output_file = open(args.output_path+args.retrieval_model+'-'+args.index_type+'.txt', 'w')
rank = 0
line = input_file.readline()
while line:
if line.startswith('<num>'):
num = line.split()[-1]
elif line.startswith('<title>'):
query = ' '.join(line.split()[2:])
line = input_file.readline().strip()
if line:
query += ' ' + line
query = queryPreprocessing(query, args)
result = relevanceRanking(query, args, totaldl)
for (docno, weight) in result:
output_file.write(num+' 0 '+docno+' '+str(rank)+' '+str(weight)+' '+args.retrieval_model+'\n')
rank += 1
line = input_file.readline()
input_file.close()
output_file.close()
t3 = time.time()
print('Total time:', round((t3 - t1), 2), 's')
| true |
36ea02dc6997960bd94e33ada00d24606ab8d8d6
|
Python
|
UCMHSProgramming16-17/final-project-jbrual
|
/Pokemon.py
|
UTF-8
| 2,372 | 3.515625 | 4 |
[] |
no_license
|
# Q: What are the stats of a given type of Pokemon?
# Import the appropriate modules.
import csv, requests, operator, pandas as pd, numpy as np
from bokeh.plotting import figure, output_file, save
from bokeh.charts import Bar, output_file, save
from bokeh.palettes import Accent4 as pal
# Reach the API by building its URL.
# Use user input to ask for a certain type.
base_url = "http://pokeapi.co/api/v2/type/"
type_poke = input("Select a type. ")
url = base_url + type_poke
r = requests.get(url)
# Use .json format and use the appropriate dict. key.
results = r.json()
type_list = results["pokemon"]
# Create a .csv file for export data.
csvfile = open("Stats.csv", "w")
csvwriter = csv.writer(csvfile, delimiter = ",")
# Establish header row.
csvwriter.writerow(["Pokemon", "HP", "Atk", "Def", "SAtk", "SDef", "Spd"])
# Create empty lists to store Pokemon stats.
name_pokemon = []
hp_pokemon = []
atk_pokemon = []
def_pokemon = []
satk_pokemon = []
sdef_pokemon = []
speed_pokemon = []
# For each Pokemon, determine its name and stats.
# Append them to their respective lists.
for pokemon in type_list:
# Search the json for the name of the Pokemon and its stats.
name = pokemon["pokemon"]["name"]
url = pokemon["pokemon"]["url"]
rspeed = requests.get(url)
results = rspeed.json()
hp = results["stats"][5]["base_stat"]
atk = results["stats"][4]["base_stat"]
defense = results["stats"][3]["base_stat"]
satk = results["stats"][2]["base_stat"]
sdef = results["stats"][1]["base_stat"]
speed = results["stats"][0]["base_stat"]
# Append the Pokemon's stats to each respective list.
name_pokemon.append(name)
hp_pokemon.append(hp)
atk_pokemon.append(atk)
def_pokemon.append(defense)
satk_pokemon.append(satk)
sdef_pokemon.append(sdef)
speed_pokemon.append(speed)
# Zip the rows to write each Pokemon's stats.
rows = zip(name_pokemon,hp_pokemon,atk_pokemon,def_pokemon,satk_pokemon,sdef_pokemon,speed_pokemon)
for row in rows:
csvwriter.writerow(row)
# Close the .csv file.
csvfile.close()
# Create the dataframe from the .csv file. Store bar graph in a variable form.
df = pd.read_csv("Stats.csv")
graph1 = Bar(df, 'Pokemon', values='Spd', legend=False, title= type_poke + "-type Pokemon Stats")
# Generate the .html file for the Bokeh graph.
output_file("Stats.html")
save(graph1)
| true |
21b57f4ce2736ce33da95beac78acebf28ed26fa
|
Python
|
abackes19/portfolio
|
/wallfollowing.py
|
UTF-8
| 989 | 2.796875 | 3 |
[] |
no_license
|
# welcome to wall following !
# using analog and digital sensors, it will follow a wall
import setup
import RoboPiLib as RPL
import time
now = time.time()
future = now
motorL = 0
motorR = 2
right = 19
front = 16
left = 17
analogL = 1
rgo = 2000
lgo = 1000
# ^ setup
RPL.servoWrite(motorR, rgo)
RPL.servoWrite(motorL, lgo) # turn on both motors going straight
while True:
RPL.servoWrite(motorR, rgo)
RPL.servoWrite(motorL, lgo)
while RPL.analogRead(analogL) >= 400: # middle range, can go straight
RPL.servoWrite(motorR, rgo)
RPL.servoWrite(motorL, lgo)
while RPL.analogRead(analogL) < 400: # no longer middle
if RPL.digitalRead(left) == 0: # digital also sense, so close
RPL.servoWrite(motorR, 0) # turn away from wall
RPL.servoWrite(motorL, lgo)
if RPL.digitalRead(left) == 1: # digital doesn't sense, far
RPL.servoWrite(motorR, rgo) # turn towards wall
RPL.servoWrite(motorL, 0)
| true |
45c38c233238ed44fc9f248636756686e65cc0bb
|
Python
|
ilya144/tweets_webservice
|
/application/maind.py
|
UTF-8
| 1,125 | 2.8125 | 3 |
[] |
no_license
|
from idbclient import Database
from iparser import TweetParser
from flask import Flask, render_template
from threading import Thread
import time
app = Flask(__name__)
class ParserThread(Thread):
def __init__(self, parser_tweet: TweetParser):
Thread.__init__(self)
self.parser_tweet = parser_tweet
self.tweets = []
def run(self):
while True:
self.tweets = self.parser_tweet.parse_tweets()
db.add_tweets(self.tweets)
PARSE_DELAY = 300 # задержка между парсиногом твитов
time.sleep(PARSE_DELAY)
@app.route("/")
def respone():
return render_template('index.html', tweets = db.get_last_url(10)) # load url and response
if __name__ == "__main__":
with open("../followed.txt") as f:
string = f.read()
accounts = string.split("\n")
db = Database()
parser_tweet = TweetParser(accounts)
parser_thread = ParserThread(parser_tweet)
parser_thread.start()
HOST = 'localhost'
PORT = '5000'
app.run(host= HOST, port= PORT)
| true |