blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
3076497768f90c283a8d61d7ce27f530b441f318
|
Python
|
Chappie733/MLPack
|
/networks/activations.py
|
UTF-8
| 3,552 | 3.09375 | 3 |
[
"MIT"
] |
permissive
|
import numpy as np
from numbers import Number
ACTIVATIONS = {}
def parser(func):
def wrapper(x, deriv=False, **kwargs):
if not isinstance(deriv, bool):
raise TypeError("Expected the parameter \'deriv\' to be a boolean, but received {type} instead!".format(type=type(deriv)))
elif not isinstance(x, np.ndarray) and not isinstance(x, Number):
raise TypeError("Expected the parameter \'x\' to be a numpy array or a number, but received {type} instead!".format(type=type(x)))
return func(x, deriv, kwargs)
wrapper.__name__ = func.__name__
ACTIVATIONS[wrapper.__name__.lower()] = wrapper
return wrapper
@parser
def linear(x, deriv=False, *args):
return x if not deriv else np.ones(x.shape)
@parser
def sigmoid(x, deriv=False, *args):
return 1/(1+np.exp(-x)) if not deriv else sigmoid(x)*(1-sigmoid(x))
@parser
def tanh(x, deriv=False, *args):
return np.tanh(x) if not deriv else 1-np.tanh(x)**2
@parser
def ReLu(x, deriv=False, *args):
return (np.abs(x)+x)/2 if not deriv else (np.sign(x)+1)/2
@parser
def ELU(x, deriv=False, *args):
alpha = 1 if 'alpha' not in args[0] else args[0]['alpha']
return np.where(x>0, x, alpha*(np.exp(x)-1)) if not deriv else np.where(x>0, 1, alpha*np.exp(x))
@parser
def LeakyReLu(x, deriv=False, *args):
alpha = 1 if 'alpha' not in args[0] else args[0]['alpha']
return np.where(x>0, x, alpha*x) if not deriv else np.where(x>0, 1, alpha)
@parser
def atan(x, deriv=False, *args):
return np.arctan(x) if not deriv else 1/(x**2+1)
@parser
def BentIdentity(x, deriv=False, *args):
return x+(np.sqrt(x**2+1)-1)/2 if not deriv else x/(2*np.sqrt(x**2+1))+1
@parser
def BipolarSigmoid(x, deriv=False, *args):
return (1-np.exp(-x))/(1+np.exp(x)) if not deriv else (2+np.exp(-x)-np.exp(x))/((1+np.exp(x))**2)
@parser
def gaussian(x, deriv=False, *args):
return np.exp(-x**2) if not deriv else -2*x*np.exp(-x**2)
@parser
def hardtanh(x, deriv=False, *args):
return np.where(np.abs(x)>1, np.sign(x), x) if not deriv else np.where(np.abs(x)>1, 0, 1)
@parser
def InverseSqrt(x, deriv=False, *args):
alpha = 0.1 if 'alpha' not in args[0] else args[0]['alpha']
return x/(np.sqrt(1+alpha*x**2)) if not deriv else 1/(1+alpha*x**2)**(3/2)
@parser
def LeCunTanh(x, deriv=False, *args):
alpha = 1.7159 if 'alpha' not in args[0] else args[0]['alpha']
return alpha*np.tanh(2*x/3) if not deriv else 2*alpha/(3*np.cosh(2*x/3)**2)
@parser
def LogLog(x, deriv=False, *args):
return 1-np.exp(-np.exp(x)) if not deriv else np.exp(x)*(LogLog(x)-1)
@parser
def LogSigmoid(x, deriv=False, *args):
return np.log(sigmoid(x)) if not deriv else 1-sigmoid(x)
@parser
def SELU(x, deriv=False, *args):
alpha = 1.67326 if 'alpha' not in args[0] else args[0]['alpha']
beta = 1.0507 if 'beta' not in args[0] else args[0]['beta']
return beta*np.where(x>0, x, alpha*(np.exp(x-1))) if not deriv else beta*np.where(x>0, 1, alpha*np.exp(x))
@parser
def sinc(x, deriv=False, *args):
return np.where(x!=0, np.sin(x)/x, 1) if not deriv else np.where(x!=0, np.cos(x)/x-np.sin(x)/(x**2), 0)
@parser
def swish(x, deriv=False, *args):
return x/(1+np.exp(-x)) if not deriv else np.exp(x)*(x+np.exp(x)+1)/(np.exp(x)+1)**2
@parser
def softsign(x, deriv=False, *args):
return x/(1+np.abs(x)) if not deriv else 1/(1+np.abs(x))**2
@parser
def softplus(x, deriv=False, *args):
return np.log(1+np.exp(x)) if not deriv else np.exp(x)/(1+np.exp(x))
@parser
def softmax(x, *args):
return np.exp(x)/np.sum(np.exp(x))
| true |
47818f1cb70c838624badc2eb2a76477ab0dedad
|
Python
|
jrivo/prisma-client-py
|
/tests/test_raw_queries.py
|
UTF-8
| 4,905 | 2.671875 | 3 |
[
"Apache-2.0"
] |
permissive
|
import pytest
from prisma import errors, Client
from prisma.models import Post, User
from prisma.partials import PostOnlyPublished
@pytest.mark.asyncio
async def test_query_raw(client: Client) -> None:
"""Standard usage, erroneous query and correct queries"""
with pytest.raises(errors.RawQueryError):
query = '''
SELECT *
FROM bad_table;
'''
await client.query_raw(query)
post = await client.post.create(
{
'title': 'My post title!',
'published': False,
}
)
query = '''
SELECT COUNT(*) as count
FROM Post
'''
results = await client.query_raw(query)
assert len(results) == 1
assert isinstance(results[0]['count'], int)
query = '''
SELECT *
FROM Post
WHERE id = $1
'''
results = await client.query_raw(query, post.id)
assert len(results) == 1
assert results[0]['id'] == post.id
assert results[0]['published'] is False
@pytest.mark.asyncio
async def test_query_raw_model(client: Client) -> None:
"""Transforms resuls to a BaseModel when given"""
post = await client.post.create(
{
'title': 'My post title!',
'published': False,
}
)
query = '''
SELECT *
FROM Post
WHERE id = $1
'''
posts = await client.query_raw(query, post.id, model=Post)
assert len(posts) == 1
found = posts[0]
assert isinstance(found, Post)
assert found == post
assert found.id == post.id
@pytest.mark.asyncio
async def test_query_raw_partial_model(client: Client) -> None:
"""Transforms results to a partial model"""
posts = [
await client.post.create({'title': 'foo', 'published': False}),
await client.post.create({'title': 'foo', 'published': True}),
await client.post.create({'title': 'foo', 'published': True}),
await client.post.create({'title': 'foo', 'published': False}),
]
query = '''
SELECT id, published
FROM Post
WHERE published = 0
'''
results = await client.query_raw(query, model=PostOnlyPublished)
assert len(results) == 2
assert set(p.id for p in results) == set(
p.id for p in posts if p.published is False
)
assert not hasattr(results[0], 'title')
assert results[0].published is False
assert results[1].published is False
@pytest.mark.asyncio
async def test_query_raw_no_result(client: Client) -> None:
"""No result returns empty list"""
query = '''
SELECT *
FROM Post
WHERE id = 'sdldsd'
'''
results = await client.query_raw(query)
assert len(results) == 0
results = await client.query_raw(query, model=Post)
assert len(results) == 0
@pytest.mark.asyncio
async def test_query_raw_incorrect_params(client: Client) -> None:
"""Passings too many parameters raises an error"""
query = '''
SELECT COUNT(*) as total
FROM Post
'''
results = await client.query_raw(query)
assert len(results) == 1
assert results[0]['total'] == 0
with pytest.raises(errors.RawQueryError):
await client.query_raw(query, 1)
@pytest.mark.asyncio
async def test_execute_raw(client: Client) -> None:
"""Basic usage"""
post = await client.post.create(
{
'title': 'My post title.',
'published': False,
}
)
assert isinstance(post.id, str)
query = '''
UPDATE Post
SET title = 'My edited title'
WHERE id = $1
'''
count = await client.execute_raw(query, post.id)
assert count == 1
found = await client.post.find_unique(where={'id': post.id})
assert found is not None
assert found.id == post.id
assert found.title == 'My edited title'
@pytest.mark.asyncio
async def test_execute_raw_no_result(client: Client) -> None:
"""No result returns 0"""
query = '''
UPDATE Post
SET title = 'updated title'
WHERE id = 'sdldsd'
'''
count = await client.execute_raw(query)
assert count == 0
@pytest.mark.asyncio
async def test_query_first(client: Client) -> None:
"""Standard usage"""
user = await client.user.create({'name': 'Robert'})
query = '''
SELECT *
FROM User
WHERE User.id = ?
'''
found = await client.query_first(query, user.id)
assert found == {'id': user.id, 'name': 'Robert'}
@pytest.mark.asyncio
async def test_query_first_model(client: Client) -> None:
"""Transforms result to a BaseModel if given"""
user = await client.user.create({'name': 'Robert'})
query = '''
SELECT *
FROM User
WHERE User.id = ?
'''
found = await client.query_first(query, user.id, model=User)
assert found is not None
assert found.id == user.id
assert found.name == 'Robert'
| true |
1354e9a676840826f45e30c33b8a791b405ef1db
|
Python
|
AdamZhouSE/pythonHomework
|
/Code/CodeRecords/2984/60697/281306.py
|
UTF-8
| 263 | 3.390625 | 3 |
[] |
no_license
|
str1=input()
str2=input()
if(len(str1)!=len(str2)):
print("1")
else:
if(str1==str2):
print("2")
flag=True
else:
a=str1.upper()
b=str2.upper()
if(a==b):
print("3")
else:
print("4")
| true |
7673c437f6e3a1f1a86ce860832be7a531852b6f
|
Python
|
medashiva/pybasics
|
/swapping.py
|
UTF-8
| 192 | 3.796875 | 4 |
[] |
no_license
|
x=input("enter the first number")
y=input("enter the second number")
z=x
x=y
y=z
print('The value of x after swapping: {}'.format(x))
print('The value of y after swapping: {}'.format(y))
| true |
76cdf6cb7d568077963313c61079e0b24f5c0caa
|
Python
|
animformed/problem-sets-mit-ocw-6
|
/pylab_examples.py
|
UTF-8
| 1,213 | 3.984375 | 4 |
[
"Giftware"
] |
permissive
|
from pylab import *
import random
plot([1, 2, 3, 4]) # when not instructed explicitly, plot assumes x axis from 0 as 0, 1, 2, 3. These four values in list are y values
plot([5, 6, 7, 8])
plot([1, 2, 3, 4],[1, 4, 9, 16]) # with x and y axis values (x, y)
figure() # create a new figure, instead of putting the plot in the same figure
plot([1, 2, 3, 4],[1, 4, 9, 16],'ro') # plot with red circle markers
axis([0, 6, 0, 20]) # specifying the length of display axes; x from 0 to 6, y from 0 to 20
title('Earnings') # title of the figure
xlabel('Days') # x and y axis labels
ylabel('Dollars')
figure()
xAxis = array([1, 2, 3, 4]) # create an array. Can use standard operations on array objects
print xAxis
test = arange(1, 5) # gives you an array of ints, not a list
print test
print test == xAxis
yAxis = xAxis**3 # not possible if xAxis was a list.
plot(xAxis, yAxis, 'ro')
title('x and y')
axis([0, 5, 0, 70])
figure()
vals = []
dieVals = [1, 2, 3, 4, 5, 6]
for i in range(10000):
vals.append(random.choice(dieVals) + random.choice(dieVals))
hist(vals, bins = 11)
show()
| true |
6d31bfe38b55d59f104c883a7ce32eec67700600
|
Python
|
RelaxedDong/python_base
|
/面向对象-2/demo-1.py
|
UTF-8
| 667 | 3.6875 | 4 |
[] |
no_license
|
# class Person(object):
# def __init__(self,name,age):
# self.name = name
# self.age = age
#
#
# def say(self):
# p = self.__class__('tanyajuan',20)
# print(p.name,p.age)
# print('my name is %s,age is %d'%(self.name,self.age))
#
# p = Person('donghao',20)
#
# p.say()
# print(p.__class__)
class Person(object):
def __init__(self,money):
self.__money = money
def setmoney(self,money):
if money<0:
money = 0
self.__money = money
def getmoney(self):
return self.__money
p = Person(200)
p.setmoney(-1)
p._Person__money = 200
print(p.getmoney())
print(p.__dict__)
| true |
442ac389f12da4bafbd3fa4a2fef14e0054cb1b3
|
Python
|
markberreth/DataCamp
|
/Manipulating Time Series Data.py
|
UTF-8
| 5,382 | 3.703125 | 4 |
[] |
no_license
|
'''
Starting new course for time series analysis
'''
# Create the range of dates here
seven_days = pd.date_range(start='2017-1-1', periods=7, freq='D')
# Iterate over the dates and print the number and name of the weekday
for day in seven_days:
print(day.dayofweek, day.weekday_name)
# Inspect data
print(data.info())
# Convert the date column to datetime64
data.date = pd.to_datetime(data.date)
# Set date column as index
data.set_index('date', inplace=True)
# Inspect data
print(data.info())
# Plot data
data.plot(subplots=True)
plt.show()
# Create dataframe prices here
prices = pd.DataFrame([])
# Select data for each year and concatenate with prices here
for year in ['2013', '2014', '2015']:
price_per_year = yahoo.loc[year, ['price']].reset_index(drop=True)
price_per_year.rename(columns={'price': year}, inplace=True)
prices = pd.concat([prices, price_per_year], axis=1)
# Plot prices
prices.plot()
plt.show()
# Inspect data
print(co.info())
# Set the frequency to calendar daily
co = co.asfreq('D')
# Plot the data
co.plot(subplots=True)
plt.show()
# Set frequency to monthly
co = co.asfreq('M')
# Plot the data
co.plot(subplots=True)
plt.show()
# Import data here
google = pd.read_csv('google.csv', parse_dates=['Date'], index_col='Date')
# Set data frequency to business daily
google = google.asfreq('B')
# Create 'lagged' and 'shifted'
google['lagged'] = google['Close'].shift(periods=-90)
google['shifted'] = google['Close'].shift(periods=90)
# Plot the google price series
google.plot(subplots=True)
plt.show()
# Created shifted_30 here
yahoo['shifted_30'] = yahoo['price'].shift(periods=30)
# Subtract shifted_30 from price
yahoo['change_30'] = yahoo['price'].sub(yahoo['shifted_30'])
# Get the 30-day price difference
yahoo['diff_30'] = yahoo['price'].diff(periods=30)
# Inspect the last five rows of price
print(yahoo['price'].tail(5))
# Show the value_counts of the difference between change_30 and diff_30
print(yahoo['diff_30'].sub(yahoo['change_30']).value_counts())
# Create daily_return
google['daily_return'] = google['Close'].pct_change(periods=1) * 100
# Create monthly_return
google['monthly_return'] = google['Close'].pct_change(periods=30) * 100
# Create annual_return
google['annual_return'] = google['Close'].pct_change(periods=360) * 100
# Plot the result
google.plot(subplots=True)
plt.show()
# Import data here
prices = pd.read_csv('asset_classes.csv', parse_dates=['DATE'], index_col=['DATE'])
# Inspect prices here
print(prices.info())
# Select first prices
first_prices = prices.iloc[0]
# Create normalized
normalized = prices.div(first_prices) * 100
# Plot normalized
normalized.plot()
plt.show()
# Import stock prices and index here
stocks = pd.read_csv('nyse.csv', parse_dates=['date'], index_col='date')
dow_jones = pd.read_csv('dow_jones.csv', parse_dates=['date'], index_col='date')
# Concatenate data and inspect result here
data = pd.concat([stocks, dow_jones], axis=1)
print(data.info())
# Normalize and plot your data here
data.div(data.iloc[0]).mul(100).plot()
plt.show()
# Create tickers
tickers = ['MSFT', 'AAPL']
# Import stock data here
stocks = pd.read_csv('msft_aapl.csv', parse_dates=['date'], index_col='date')
# Import index here
sp500 = pd.read_csv('sp500.csv', parse_dates=['date'], index_col='date')
# Concatenate stocks and index here
data = pd.concat([stocks, sp500], axis=1).dropna()
# Normalize data
normalized = data.div(data.iloc[0]).mul(100)
# Subtract the normalized index from the normalized stock prices, and plot the result
normalized[tickers].sub(normalized['SP500'], axis=0).plot()
plt.show()
# Set start and end dates
start = '2016-1-1'
end = '2016-2-29'
# Create monthly_dates here
monthly_dates = pd.date_range(start=start, end=end, freq='M')
# Create and print monthly here
monthly = pd.Series(data=[1, 2], index=monthly_dates)
print(monthly)
# Create weekly_dates here
weekly_dates = pd.date_range(start=start, end=end, freq='W')
# Print monthly, reindexed using weekly_dates
print(monthly.reindex(weekly_dates))
print(monthly.reindex(weekly_dates, method='bfill'))
print(monthly.reindex(weekly_dates, method='ffill'))
# Import data here
data = pd.read_csv('unemployment.csv', parse_dates=['date'], index_col='date')
# Show first five rows of weekly series
print(data.asfreq('W').head())
# Show first five rows of weekly series with bfill option
print(data.asfreq('W', method='bfill').head())
# Create weekly series with ffill option and show first five rows
weekly_ffill = data.asfreq('W', method='ffill')
print(weekly_ffill.head())
# Plot weekly_fill starting 2015 here
weekly_ffill.loc['2015':].plot()
plt.show()
# Inspect data here
print(monthly.info())
# Create weekly dates
weekly_dates = pd.date_range(start=monthly.index.min(), end=monthly.index.max(), freq='w')
# Reindex monthly to weekly data
weekly = monthly.reindex(weekly_dates)
# Create ffill and interpolated columns
weekly['ffill'] = weekly.UNRATE.ffill()
weekly['interpolated'] = weekly.UNRATE.interpolate()
# Plot weekly
weekly.plot()
plt.show()
# Import & inspect data here
data = pd.read_csv('debt_unemployment.csv', index_col='date', parse_dates=['date'])
print(data.info())
# Interpolate and inspect here
interpolated = data.interpolate()
print(interpolated.info())
# Plot interpolated data here
interpolated.plot(secondary_y='Unemployment')
plt.show()
| true |
67d80e9f6e08c777b363ce0c82c10aabe9387b7d
|
Python
|
ITIS-Python/practice-sobolev-2020
|
/09_decorators.py
|
UTF-8
| 731 | 4.1875 | 4 |
[] |
no_license
|
# def do_ten(func):
# for i in range(10):
# func()
# def hello_world():
# print('hello world')
# do_ten(hello_world)
###############################
# def do_ten(func):
# def wrapper():
# for i in range(10):
# func()
# return wrapper
# @do_ten
# def hello_world():
# print('hello world')
# hello_world()
###################################
def do_ten(word):
def wrapper1(func):
def wrapper2():
for i in range(10):
func(word)
return wrapper2
return wrapper1
@do_ten('word')
def hello_world(word='world'):
print(f'hello {word}')
hello_world()
# Page to help
# https://realpython.com/primer-on-python-decorators/
| true |
f99f2563a4fc7ac7021fd99f8fa39329c569ba10
|
Python
|
rgreenblatt/Equality-Scoring
|
/calculator.py
|
UTF-8
| 1,538 | 3.515625 | 4 |
[] |
no_license
|
#cite http://planspace.org/2013/06/21/how-to-calculate-gini-coefficient-from-raw-data-in-python/
import copy
def gini(list_of_values):
sorted_list = sorted(list_of_values)
height, area = 0, 0
for value in sorted_list:
height += value
area += height - value / 2.
fair_area = height * len(list_of_values) / 2.
return (fair_area - area) / fair_area
#determines how effective a solution is
def valueFunction(giniArray):
i = 0
l = copy.copy(len(giniArray))
value = 0
for gini in giniArray:
value += gini*(1-i/l)
i+=1
return value
#cTeach = {Art:1, Bio:4, Chem:3, English:5, French:1, German:1, Spanish:1, Math:6, Music:1, Phys:3, Social: 5}
#nTeach = {Art:0, Bio:0, Chem:1, English:0, French:.5, German:0, Spanish:.5, Math:2, Music:1, Phys:1, Social: 0}
def evaluate(nTeach, dataset):
#cTeach = {'Art':1, 'Bio':4, 'Chem':3, 'English':5, 'French':1, 'German':1, 'Spanish':1, 'Math':6, 'Music':1, 'Phys':3, 'Social': 5}
cTeach = [1, 4, 3, 5, 2, 1, 6, 1, 3, 5]
tTeach = []
i = 0
for i in range(len(cTeach)):
tTeach.append(cTeach[i] + nTeach[i])
giniByYear = []
#print tTeach
for students in dataset:
sPerT = []
for index in range(len(students)):
sPerT.append(float(students[index])/tTeach[index])
giniByYear.append(gini(sPerT))
#print giniByYear
#with open('Exports/2001to13.csv', 'wb') as output:
# writer = csv.writer(output)
# writer.writerow("Year", "Gini")
# c = 0
# for value in giniByYear:
# writer.writerow([2018+c, giniByRow[c]])
# c = c + 1
return valueFunction(giniByYear)
| true |
34479cc8fedb7ce84af9ca5b95d5648d7e87f75c
|
Python
|
alexissitu/alexissitu.github.io
|
/Testfolder/chatbox.py
|
UTF-8
| 1,959 | 3.6875 | 4 |
[] |
no_license
|
def intro():
print()
print("Hi, welcome to chatbox!")
print("Please talk to me!")
print()
def is_valid_input(answer, listOfResponses):
#if answer is in list of listOfResponses
#return True
#else
#returnFalse
for x in listOfResponses:
if answer in listOfResponses:
return True
else:
return False
# def process_input(answer, name): #parameter
# if answer == "Hello" or answer == "Hi" or answer == "Hey":
# say_greeting(answer, name)
# else:
# say_bye()
# #print("hello, thank you for talking to me")
def process_input(answer,user):
greetings = ["Hey", "Hi","Hello"]
goodbyes = ["goodbye", "bye", "see ya"]
if is_valid_input(answer, greetings):
say_greeting(answer,user)
elif is_valid_input(answer, goodbyes):
say_bye()
if answer in greetings:
say_greeting(answer, user)
elif answer == "Bye":
say_bye
def say_greeting(answer, name):
print()
print ("Oh hi! ")
print("Nice to meet you, %s!" % name)
print()
mood = input ("How are you doing? ")
say_goodresponse(mood)
say_badresponse(mood)
answerAge(age)
def reply():
if mood == "good":
say_goodresponse(mood)
if mood == "bad":
say_badresponse(mood)
def say_badresponse(mood):
print()
print("I'm sorry to hear that.")
print()
def say_goodresponse(mood):
print()
print ("That's great to hear! ")
print()
def say_bye():
print()
print("see ya! ")
print()
def answerAge():
age = input("How old are you? ")
ageResponse(age)
def ageResponse(age):
if age.is_integer():
print ("nice! goodbye.")
def main():
intro()
while True:
user = input("What's your name? ")
answer = input("What will you say? ")
process_input(answer, user)
if __name__ == "__main__":
main()
| true |
c25357582d9f339a219e0c11749ad7c28befd77a
|
Python
|
kiddays/Covid19-WWIE
|
/extract_abstracts.py
|
UTF-8
| 2,347 | 2.703125 | 3 |
[] |
no_license
|
import glob, json, jsonlines
import random
from nltk import word_tokenize, sent_tokenize
def jsonl_file_create(glob_list):
x = 0
y = 0
with jsonlines.open('100abstracts.jsonl', mode='a') as writer:
for file in glob_list:
if y == 100:
break
with open(file, encoding='utf8') as f:
data = json.load(f)
file_name = data['paper_id']
full_abstract = ""
for thing in data['abstract']:
full_abstract += thing['text'] + ' '
if full_abstract:
if len(full_abstract) > 100:
y += 1
# print(full_abstract, '\n')
# writer.write({"id": x, "abstract": full_abstract})
writer.write({"text": full_abstract})
x += 1
def conLL_file_create(glob_list):
x = 1
y = 0
with open('train_multiBIONER_subset.txt', 'a', encoding='utf8') as writer:
for file in glob_list[:300]:
if y == 100:
break
with open(file, encoding='utf8') as f:
data = json.load(f)
file_name = data['paper_id']
full_abstract = ""
for thing in data['abstract']:
full_abstract += thing['text'] + ' '
if full_abstract:
if len(full_abstract) > 100:
y += 1
print("abstract: ", y)
toked_sents = sent_tokenize(full_abstract)
toked_sents = [word_tokenize(sent) for sent in toked_sents]
# print(toked_sents)
for sent in toked_sents:
print("sent:", x)
x += 1
for toke in sent:
writer.write(toke + '\n')
writer.write('.....\n')
def main():
glob_list = glob.glob("./comm_use_subset/pdf_json/*.json") # common use pdf_json files directory
random.shuffle(glob_list)
jsonl_file_create(glob_list)
conLL_file_create(glob_list)
if __name__ == "__main__":
main()
| true |
9821190b068f8a3414fe0ded3f739bdc1f7f30e4
|
Python
|
ivycheung7/Python_Scripts
|
/funScripts.py
|
UTF-8
| 3,087 | 3.53125 | 4 |
[] |
no_license
|
#!/usr/bin/env python
#Python 3.5.2
from bs4 import BeautifulSoup
import requests
from PIL import Image, ImageDraw, ImageFont
import random, string
from time import strftime, gmtime
import datetime, time, sys
"Script returns and displays the links to each top trending projects from GitHub."
def displayGithubTrendingProjects():
print(strftime("%A, %b %d, %Y %I:%M%p", gmtime()))
html = requests.get("https://github.com/trending")
soup = BeautifulSoup(html.text, "html.parser")
urlList = []
for header in soup.find_all('h3'):
for url in header.find_all('a'):
print(str('https://github.com' + url.get('href')))
urlList.append(str('https://github.com' + url.get('href')))
return urlList
"Creates a rip-off of Gmail icons. Alignments need work"
def createLetterIcon():
isColorBright = False
name = input("Enter your name\n")
firstCharInName = name[0].upper()
r = random.randrange(0,255,1)
g = random.randrange(0,255,1)
a = random.randrange(0,255,1)
if (r&g&a > 160):
isColorBright = True
font = ImageFont.truetype( "TitilliumWeb-Regular.ttf", 140)
icon = Image.new('RGBA', (256,256), (r,g,a,0))
canvas = ImageDraw.Draw(icon)
if isColorBright == False:
canvas.text((85,20), firstCharInName,(255,255,255,0), font)
else:
canvas.text((85,20), firstCharInName,(0,0,0,0), font)
canvas = ImageDraw.Draw(icon)
icon.show()
"Creates/Overwrite today's file to write attendance information in."
def checkIn():
print("Type in 0 to stop taking attendance.")
totalCount = 0;
with open("attendance-" + str(datetime.date.today()) + ".csv", 'w') as attendance:
attendance.write("Day, Date,Time,Full Name\n")
while(True):
name = input("Enter your full name.(" + str(totalCount) + ") currently in attendance.\n");
if (name == "0"):
break;
totalCount += 1
attendance.write(str(strftime("%A, %Y-%m-%d,%I:%M%p", gmtime()) + "," + name + '\n'));
attendance.close();
readFile()
'Reads and prints the data collected line-by-line'
def readFile():
print('Opening file: "attendance-' + str(datetime.date.today()) + '.csv"\n')
with open("attendance-" + str(datetime.date.today()) + ".csv", 'r+') as attendance:
for line in attendance:
print(line)
attendance.close()
'Welcome to my one stop shop! For... currently three Python scripts'
def menu():
print("Hi there!")
while(True):
selection = input("What script would you like to run?\n1. Attendance CSV \n2. Create Gmail Icon \n3. Display Github Trending Projects \n0. I want to leave\n")
if(selection == "0"):
print("You disappoint me")
break;
elif(selection == "1"):
checkIn()
elif(selection == "2"):
createLetterIcon()
elif(selection == "3"):
displayGithubTrendingProjects()
else:
print("Why are you so difficult?")
menu()
| true |
6b39a18d0c47da945020770a7e6689e7fb2f279b
|
Python
|
eschanet/leetcode
|
/merge-two-sorted-lists/merge-two-sorted-lists.py
|
UTF-8
| 967 | 3.53125 | 4 |
[] |
no_license
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
result_list = []
while l1 and l2:
if l1.val <= l2.val:
result_list.append(l1.val)
l1 = l1.next
else:
result_list.append(l2.val)
l2 = l2.next
l = l1 if l1 else l2
while l:
result_list.append(l.val)
l = l.next
if not result_list: return ListNode().next # this feels dirty
result_listnode = ListNode(result_list[0])
tail = result_listnode
i = 1
while i < len(result_list):
tail.next = ListNode(result_list[i])
tail = tail.next
i += 1
return result_listnode
| true |
228f10e6d2edc2accba41f9f3de4c85fa1c8531a
|
Python
|
erjantj/hackerrank
|
/special-palindrome-again.py
|
UTF-8
| 669 | 3.171875 | 3 |
[] |
no_license
|
def substrCount(n, s):
s = s+'*'
arr = []
row = ['', 0]
polindromes = 0
for i in range(n+1):
c = s[i]
if c == row[0]:
row[1] += 1
if c != row[0]:
if row[0]:
arr.append(row)
row = [c, 1]
for row in arr:
polindromes += (row[1]*(row[1]+1))//2
for i in range(1,len(arr)-1):
if arr[i-1][0] == arr[i+1][0] and arr[i][1] == 1:
polindromes += min(arr[i-1][1],arr[i+1][1])
return polindromes
with open('input.txt') as f:
content = f.readlines()
n = int(content[0])
s = content[1].strip()
print(substrCount(n,s))
| true |
598bcd3c45efedae2e402d4305e11a81e6fd7eb2
|
Python
|
wesreisz/intro_python
|
/PythonApplication1/11_WorkingWithFiles/_11_WorkingWithFiles.py
|
UTF-8
| 678 | 3.28125 | 3 |
[] |
no_license
|
TITLE = 'Party List to File'
print("-"*40)
print("{:^40}\n".format(TITLE))
print("-"*40)
#constants
FILENAME = "guestlist.csv"
MODE = "w"
OUTPUT_PATTERN="%s,%d\n"
#member varables
names = []
ages = []
while True :
name = input("Input guest name [blank to stop]: ")
if (len(name)<=0) :
break
else :
age = input("Input guest age: ")
names.append(str(name))
ages.append(int(age))
myfile = open(FILENAME, mode=MODE)
print("\nGuest list:")
print("-"*40)
for position in range(len(names)):
print(names[position])
myfile.write(OUTPUT_PATTERN % (names[position],ages[position]))
myfile.close()
print("\n%s written" %(FILENAME))
| true |
7a6741654fabd57ea286ab990fd2f64e489d1b07
|
Python
|
kiteB/Network_Programming
|
/hw2/5.py
|
UTF-8
| 343 | 4.03125 | 4 |
[] |
no_license
|
# for 루프를 이용하여 다음과 같은 리스트를 생성하라.
# - 0 ~ 49까지의 수로 구성되는 리스트
# - 0 ~ 49까지 수의 제곱으로 구성되는 리스트
import sys
numbers = []
squared_numbers = []
for i in range(50):
numbers.append(i)
squared_numbers.append(i**2)
print(numbers)
print(squared_numbers)
| true |
8921104ed97c620eb5c3c4277c5f317548f0f87d
|
Python
|
hubert-kompanowski/Natural-Selection-Simulation
|
/evolution/meal.py
|
UTF-8
| 417 | 2.90625 | 3 |
[
"MIT"
] |
permissive
|
from random import randrange
import pygame
from colors import *
class Meal:
def __init__(self, _screen, map, id_):
self.exist = True
(self.x, self.y) = (randrange(map[0], map[1]), randrange(map[0], map[1]))
self.screen = _screen
self.draw()
self.id = id_
def draw(self):
if self.exist:
pygame.draw.circle(self.screen, GREEN, (self.x, self.y), 5)
| true |
7c467281e3f3e898d5828fe421f916f890075f6a
|
Python
|
Deepakgarg2309/All_Program_helper
|
/Python/primeOrComposite.py
|
UTF-8
| 418 | 4.1875 | 4 |
[
"MIT"
] |
permissive
|
userEntry = int(input("Enter a number: "))
if userEntry > 1:
for i in range(2, userEntry):
if userEntry % i == 0:
print(userEntry, "is a Composite Number.")
break
else:
print(userEntry, "is a Prime Number.")
elif userEntry == 0 or userEntry == 1:
print(userEntry, "is neither a Prime Number nor a Composite number.")
else:
print(userEntry, "is a Prime Number.")
| true |
7120811a1b51c4450c6f6dfd5e10084c38da571d
|
Python
|
Santhosh02K/ENLIST-Task-1
|
/BEST-ENLIST-ASSIGNMENT.py
|
UTF-8
| 1,127 | 4.5 | 4 |
[] |
no_license
|
# strings
#how to print a value:
print("30 days 30 hour Challenge")
print('30 days 30 hour Challenge')
#Assigning string to variables
Hours = "thirty"
print(Hours)
#indexing using strings
Days = "Thirty days"
print(Days[0])
#How to print the particular character from certain text?
Challenge = "i will win"
print(Challenge[7:10])
#print the length of character
Challenge = "i will win"
print(len(Challenge))
#Convert string into lowercase
Challenge = "i will win"
print(Challenge.lower())
#string concatenation
a = "30 Days"
b = "30 hours"
c = a+b
print(c)
#Adding space in between string of concatenation
a = "30 days"
b = "30 hour challenge"
c = a + " " + b
print(c)
#casefold() - usage
text = "Thirty Days And Thirty Hours"
x = text.casefold()
print(x)
#capitalize
text = "thirty days and thirty hours"
x = text.capitalize()
print(x)
#find
text = "Thirty days and thirty hours"
x = text.find("t")
print(x)
#isalpha
text = "Thirty Days And Thirty Hours"
x = text.isalpha()
print(x)
#isalnum
text = "Thirty Days And Thirty Hours"
x = text.isalnum()
print(x)
| true |
d70302192240c7f4f87f80bb29b75d9fccf8dc05
|
Python
|
ashokkumarramajayam/cp1404-assignment1
|
/Country.py
|
UTF-8
| 305 | 2.9375 | 3 |
[] |
no_license
|
__author__ = 'Ashok_kumar'
class Country:
def __init__(self, name, code, symbol):
self.name = name;
self.code = code;
self.symbol = symbol;
def __str__(self):
return name + " " + code + " " + symbol;
def currency(self, amount):
return symbol + amount;
| true |
26efa6f5ad20468ca9406818ffcbc12d45a24dc4
|
Python
|
RoseReyes/python
|
/score-grades.py
|
UTF-8
| 651 | 3.859375 | 4 |
[] |
no_license
|
def scoreGrades():
import random
random_num = 0
for index in range(10):
random_num = random.randint(60,100)
if random_num == 60 or random_num <= 69:
print("Score:",random_num,";","Your grade is","-",'D')
elif random_num == 70 or random_num <= 79:
print("Score:",random_num,";","Your grade is","-",'C')
elif random_num == 80 or random_num <= 89:
print("Score:",random_num,";","Your grade is","-",'B')
elif random_num == 90 or random_num <= 100:
print("Score:",random_num,";","Your grade is","-",'A')
print("End of the program. Bye!")
scoreGrades()
| true |
f27b55ea9f52b1de012321dc772fa95f880134d3
|
Python
|
takuwaaan/Atcoder_Study
|
/ABC/ABC93_C.py
|
UTF-8
| 161 | 2.71875 | 3 |
[] |
no_license
|
L = list(map(int, input().split()))
L.sort()
d1 = L[-1] - L[-2]
d2 = L[-1] - d1 - L[0]
if d2 % 2 == 0:
print(d1 + d2 // 2)
else:
print(d1 + d2 // 2 + 2)
| true |
e8d458dd35daf9eee800bd92478175a7aa09aa84
|
Python
|
pandas-dev/pandas
|
/pandas/tests/tslibs/test_liboffsets.py
|
UTF-8
| 5,108 | 2.796875 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
"""
Tests for helper functions in the cython tslibs.offsets
"""
from datetime import datetime
import pytest
from pandas._libs.tslibs.ccalendar import (
get_firstbday,
get_lastbday,
)
import pandas._libs.tslibs.offsets as liboffsets
from pandas._libs.tslibs.offsets import roll_qtrday
from pandas import Timestamp
@pytest.fixture(params=["start", "end", "business_start", "business_end"])
def day_opt(request):
return request.param
@pytest.mark.parametrize(
"dt,exp_week_day,exp_last_day",
[
(datetime(2017, 11, 30), 3, 30), # Business day.
(datetime(1993, 10, 31), 6, 29), # Non-business day.
],
)
def test_get_last_bday(dt, exp_week_day, exp_last_day):
assert dt.weekday() == exp_week_day
assert get_lastbday(dt.year, dt.month) == exp_last_day
@pytest.mark.parametrize(
"dt,exp_week_day,exp_first_day",
[
(datetime(2017, 4, 1), 5, 3), # Non-weekday.
(datetime(1993, 10, 1), 4, 1), # Business day.
],
)
def test_get_first_bday(dt, exp_week_day, exp_first_day):
assert dt.weekday() == exp_week_day
assert get_firstbday(dt.year, dt.month) == exp_first_day
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(0, 15, datetime(2017, 11, 15)),
(0, None, datetime(2017, 11, 30)),
(1, "start", datetime(2017, 12, 1)),
(-145, "end", datetime(2005, 10, 31)),
(0, "business_end", datetime(2017, 11, 30)),
(0, "business_start", datetime(2017, 11, 1)),
],
)
def test_shift_month_dt(months, day_opt, expected):
dt = datetime(2017, 11, 30)
assert liboffsets.shift_month(dt, months, day_opt=day_opt) == expected
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(1, "start", Timestamp("1929-06-01")),
(-3, "end", Timestamp("1929-02-28")),
(25, None, Timestamp("1931-06-5")),
(-1, 31, Timestamp("1929-04-30")),
],
)
def test_shift_month_ts(months, day_opt, expected):
ts = Timestamp("1929-05-05")
assert liboffsets.shift_month(ts, months, day_opt=day_opt) == expected
def test_shift_month_error():
dt = datetime(2017, 11, 15)
day_opt = "this should raise"
with pytest.raises(ValueError, match=day_opt):
liboffsets.shift_month(dt, 3, day_opt=day_opt)
@pytest.mark.parametrize(
"other,expected",
[
# Before March 1.
(datetime(2017, 2, 10), {2: 1, -7: -7, 0: 0}),
# After March 1.
(Timestamp("2014-03-15", tz="US/Eastern"), {2: 2, -7: -6, 0: 1}),
],
)
@pytest.mark.parametrize("n", [2, -7, 0])
def test_roll_qtrday_year(other, expected, n):
month = 3
day_opt = "start" # `other` will be compared to March 1.
assert roll_qtrday(other, n, month, day_opt, modby=12) == expected[n]
@pytest.mark.parametrize(
"other,expected",
[
# Before June 30.
(datetime(1999, 6, 29), {5: 4, -7: -7, 0: 0}),
# After June 30.
(Timestamp(2072, 8, 24, 6, 17, 18), {5: 5, -7: -6, 0: 1}),
],
)
@pytest.mark.parametrize("n", [5, -7, 0])
def test_roll_qtrday_year2(other, expected, n):
month = 6
day_opt = "end" # `other` will be compared to June 30.
assert roll_qtrday(other, n, month, day_opt, modby=12) == expected[n]
def test_get_day_of_month_error():
# get_day_of_month is not directly exposed.
# We test it via roll_qtrday.
dt = datetime(2017, 11, 15)
day_opt = "foo"
with pytest.raises(ValueError, match=day_opt):
# To hit the raising case we need month == dt.month and n > 0.
roll_qtrday(dt, n=3, month=11, day_opt=day_opt, modby=12)
@pytest.mark.parametrize(
"month",
[3, 5], # (other.month % 3) < (month % 3) # (other.month % 3) > (month % 3)
)
@pytest.mark.parametrize("n", [4, -3])
def test_roll_qtr_day_not_mod_unequal(day_opt, month, n):
expected = {3: {-3: -2, 4: 4}, 5: {-3: -3, 4: 3}}
other = Timestamp(2072, 10, 1, 6, 17, 18) # Saturday.
assert roll_qtrday(other, n, month, day_opt, modby=3) == expected[month][n]
@pytest.mark.parametrize(
"other,month,exp_dict",
[
# Monday.
(datetime(1999, 5, 31), 2, {-1: {"start": 0, "business_start": 0}}),
# Saturday.
(
Timestamp(2072, 10, 1, 6, 17, 18),
4,
{2: {"end": 1, "business_end": 1, "business_start": 1}},
),
# First business day.
(
Timestamp(2072, 10, 3, 6, 17, 18),
4,
{2: {"end": 1, "business_end": 1}, -1: {"start": 0}},
),
],
)
@pytest.mark.parametrize("n", [2, -1])
def test_roll_qtr_day_mod_equal(other, month, exp_dict, n, day_opt):
# All cases have (other.month % 3) == (month % 3).
expected = exp_dict.get(n, {}).get(day_opt, n)
assert roll_qtrday(other, n, month, day_opt, modby=3) == expected
@pytest.mark.parametrize(
"n,expected", [(42, {29: 42, 1: 42, 31: 41}), (-4, {29: -4, 1: -3, 31: -4})]
)
@pytest.mark.parametrize("compare", [29, 1, 31])
def test_roll_convention(n, expected, compare):
assert liboffsets.roll_convention(29, n, compare) == expected[compare]
| true |
edfecf44f5624e00ff7e52cce6c55f8fba33ee60
|
Python
|
frapa/A11
|
/esperienza_1/pendolo100.py
|
UTF-8
| 2,592 | 2.875 | 3 |
[] |
no_license
|
# -*- encoding: utf-8 -*-
from math import *
import csv
import itertools
mpl = False
try:
import matplotlib.pyplot as plt
from matplotlib import rc
mpl = True
except:
pass
def mean(data):
return sum(data) / len(data)
tex = "\\begin{table}\n\t\\begin{tabular} {" + " | ".join(["c c c c c"] * 2) + "}\n\t\t\\toprule\n"
tex += "\t\t\multicolumn{10}{c}{Periodo del pendolo - Misure di un operatore [s]} \\\\\n\t\t\\midrule\n"
# leggi i dati
header = []
meas = []
meas_txt = []
with open('pendolo/n100.csv') as csvfile:
data = csv.reader(csvfile)
for row in data:
meas.append(float(row[0]))
meas_txt.append(row[0])
l = 0
for i in range(10):
if l == 5:
tex += "\t\t\\midrule\n"
tex += "\t\t" + " & ".join([x for n, x in enumerate(meas_txt) if ((n - i) % 10) == 0]) + " \\\\\n"
l += 1
tex += "\t\\bottomrule\n\t\\end{tabular}\n\\end{table}"
print tex
print
# compute some stuff
xs = meas
N = len(xs)
m = mean(xs)
D = sum([(x - m)**2 for x in xs]) / (N - 1)
sigma = sqrt(D)
print "N = {}\nm = {}\nD = {}\nσ = {}".format(N, m, D, sigma)
# bins
delta = 0.03
x_min = 1.355
x_max = 1.625
bins = [x_min + i * delta for i in range(int((x_max - x_min) / delta) + 1)]
if mpl:
# istogramma
n, r_bins, patches = plt.hist(xs, bins, (x_min, x_max),
normed=True, color=(0, 0.65, 1), alpha=0.7, zorder=2)
t = plt.title('Periodo del pendolo', fontsize=16)
t.set_y(1.14)
plt.suptitle('100 misure di un singolo sperimentatore', x=0.50, y=0.90)
plt.xlabel('Periodo [s]', fontsize=14)
plt.ylabel(ur'Densità campionaria $[\frac{1}{s}]$', fontsize=14)
plt.gca().set_ylim((0, 11))
plt.grid(True)
plt.vlines(m, 0, 100, linestyles='dashed', linewidth=2, color=(0, 0, 1), zorder=5)
plt.axvspan(m - sigma, m + sigma, color=(0.7, 0.7, 0.7), alpha=0.5, zorder=-5)
plt.text(m - 0.003, 11.2, "m")
plt.vlines(m - sigma - 0.00025, 0, 100, linestyles='dashed', linewidth=1, color=(0.55, 0.55, 0.55), zorder=-4)
plt.vlines(m + sigma - 0.00025, 0, 100, linestyles='dashed', linewidth=1, color=(0.55, 0.55, 0.55), zorder=-4)
plt.text(m - sigma - 0.012, 11.2, u"m - σ")
plt.text(m + sigma - 0.014, 11.2, u"m + σ")
prob = plt.twinx()
prob.set_ylim((0, 0.33))
prob.set_ylabel(u"Frequenza campionaria", fontsize=14)
prob.set_yticks((0, 0.06, 0.12, 0.18, 0.24, 0.3))
plt.gca().set_xlim((1.33, 1.65))
# make sure nothing goes outside the screen
plt.subplots_adjust(left=0.11, right=0.89, top=0.81, bottom=0.13)
plt.show()
| true |
33078ed60dca9c7300dd66b398b9484d564f8187
|
Python
|
Beovulfo/Python
|
/modules/utility/field2hdf5.py
|
UTF-8
| 3,152 | 2.796875 | 3 |
[] |
no_license
|
"""
Module for converting binary file for field XZ generated by TOFIS_LOMA
into HDF5 compressed file
"""
def field2hdf5(filename):
"""
This function reads the XZ field generated by TOFIS fortran program and
converts it to same filename + .h5, using gzip compression. Furthermore
this new file includes: xvec,zvec,y,jspecy,Re,time and the field itself.
{y,xvec,zvec,time} = readfieldxz(filename)
"""
import numpy as np
import h5py
import os.path
if os.path.isfile(filename+'.h5')==True:
print "This file already exists man!Nothing done."
return
f = open(filename,'rb')
#Create dtypes for proper reading from Fortran unformatted
# binary file
#Declaring types
yfmap = np.dtype([('y','float64'),('fmap','float64')])
#uw00 = np.dtype([('u00','float32'),('w00','float32')])
rec1 = np.dtype([('dummy1','uint32'), \
('time','float32'),('Re','float32'), \
('alp','float32'),('bet','float32'), \
('mgalx','uint32'),('my','uint32'),('mgalz','uint32'),\
('nspec','uint32'),('nacum','uint32'),\
('dummy2','uint32')])
#Read first record
RECORD1=np.fromfile(f,rec1,1)
#Check if first record is ok...
if RECORD1['dummy1'] != RECORD1['dummy2']:
print "File read not good...!"
return
else:
print "File RECORD1 read correctly :)"
mgalx=RECORD1['mgalx'][0]
my=RECORD1['my'][0]
mgalz=RECORD1['mgalz'][0]
nspec=RECORD1['nspec'][0]
print "nspec= %s" % nspec
rec2 = np.dtype([('dummy1','uint32'), \
('jspecy','uint32',nspec), \
('yfmap',yfmap,my), \
('dummy2','uint32')])
#READ record2
RECORD2=np.fromfile(f,rec2,1)
#Check if record2 is ok...
if RECORD2['dummy1'] != RECORD2['dummy2']:
print "File read not good...!"
return
else:
print "File RECORD2 read correctly :)"
#Save y vector amd jspecy
y = RECORD2['yfmap']['y'][0,]
jspecy = RECORD2['jspecy'][0,]
#Define plane info
planey = np.ndarray(shape=(mgalx,mgalz),order='F',dtype=float)
#Create type "recplane"
recplaney = np.dtype([('dummy1','uint32'), \
('data','float32',[mgalz,mgalx]), \
('dummy2','uint32')])
#Read all planes Y info
FIELD1=np.ndarray(shape=(nspec,mgalz,mgalx),\
dtype=float)
for j in range(nspec):
readrec = np.fromfile(f,recplaney,1)
planeydata = readrec['data']
FIELD1[j,:,:] = planeydata[:,:]
f.close()
#FIELD1.shape=(nspec,mgalz,mgalx)
#Create vector X and Z
Lx = 2*3.1415/RECORD1['alp']
Lz = 2*3.1415/RECORD1['bet']
x = np.linspace(-Lx/2.,Lx/2.,mgalx)
z = np.linspace(-Lz/2.,Lz/2.,mgalz)
hf5 = h5py.File(filename+'.h5','w')
hf5.create_dataset('field',data=FIELD1,compression='gzip')
hf5.create_dataset('xvec',data=x)
hf5.create_dataset('zvec',data=z)
hf5.create_dataset('y',data=y)
hf5.create_dataset('time',data=RECORD1['time'])
hf5.create_dataset('Re',data=RECORD1['Re'])
hf5.create_dataset('jspecy',data=jspecy)
hf5.close()
del FIELD1
print 'Data from time = %s' % RECORD1['time']
print 'mgalx = %s, my = %s, mgalz = %s' % (RECORD1['mgalx'], \
RECORD1['my'],RECORD1['mgalz'])
#Return y, FIELD
print "File conversion finished. Congratulations"
return
| true |
ba27d1e142c62399a60a629a647a36d5096af611
|
Python
|
MatthewJin001/HECalib
|
/code/script_opt_optandinit.py
|
UTF-8
| 760 | 2.53125 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
'''
For the given data file, optimized distribution of X is unpicked
and compared with the original one
'''
from optimization import optandinit
import params
from helpers import stats
if __name__ == '__main__':
target = 'mean'
datafile = params.datafiles[0]
norms_initial, norms_optimal, opt = optandinit.compare(datafile, target)
print opt.XInitial
print opt.XOptimal
s_initial = stats.calc_statistics(norms_initial)
s_optimal = stats.calc_statistics(norms_optimal)
stats.print_statistics_header()
stats.print_statistics(s_initial)
stats.print_statistics(s_optimal)
optandinit.create_comparison_histogram(norms_initial, norms_optimal, opt, datafile)
| true |
db0c8342b5bd2297d90ce55ac19291ca167c8ec1
|
Python
|
Mika-IO/python-skills
|
/random-things/retorna lista de inteiros ordenados.py
|
UTF-8
| 319 | 3.34375 | 3 |
[] |
no_license
|
def ordenar_inteiros_numa_lista(lista):
for i in range(len(lista)):
lista[i] = int(lista[i])
lista.sort()
return lista
print(ordenar_inteiros_numa_lista([5,4,3,2,1]))
'''
UMA FORMA MAIS EFICIENTE DE ORDENAR AS LISTAS DE NUMEROS DOS EXERCICIOS 16.8 DO GRUPY-SANCA É UTILIZAR ESSA FUNÇÃO
'''
| true |
f51ddb69129804435391c602975917f8b1c87877
|
Python
|
damiansp/completePython
|
/game/04_creating_visuals/color_utils.py
|
UTF-8
| 630 | 3.546875 | 4 |
[] |
no_license
|
def darken(color, scale):
assert 0 <= scale <= 1, '`scale` must be between 0 and 1'
color = [comp * scale for comp in color]
return color
def scale_color(color, scale):
'''alias for `darken()`'''
return darken(color, scale)
def saturate(color):
color = [min(comp, 255) for comp in color]
return color
def saturate_color(color):
'''alias for `saturate()`'''
return saturate(color)
def lerp(v1, v2, factor=0.5, as_int=True):
'''linear interpolation between `v1` and `v2`'''
mix = v1 + factor*(v2 - v1)
if as_int:
mix = int(round(mix))
return mix
| true |
2926f00bf50495b3eaa76a4c549c3ff028cf1074
|
Python
|
madhubabuv/Path-Finding-Algorithms
|
/Qlearning.py
|
UTF-8
| 4,238 | 2.703125 | 3 |
[] |
no_license
|
import numpy as np
import cv2
import time
from random import randint
gamma=0.8
iterate=250
actions=8
cur_node=0
Q=[]
top=[0]
bottom=[]
right=[]
left=[0]
pose=[]
count=0
path_cost=[0,10,0,10,0,10,0,10]
img1=cv2.imread('images/example.jpg',-1)
img=cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img1,5)
frame = cv2.Canny(img,100,200)
kernel_d= np.ones((3,3),np.uint8)
edges = cv2.dilate(frame,kernel_d,iterations = 5)
height, width = img.shape[:2]
comp_h=int(width/43)+1
comp_v=int(height/43)+1
nodes=(((height/43)+1)*((width/43)+1))
print nodes,comp_h,comp_v
print "Conside a gird of",comp_h,"X",comp_v,"and Give Start and Goal "
node=input("enter the staring node\t")
goal=input('enter the goal\t')
print 'calculating the path...'
# Draw a blue line with thickness of 1 px
for i in range(0,height,43):
cv2.line(img1,(0,i),(width,i),(255,0,0),1)
for j in range(0,width,43):
cv2.line(img1,(j,0),(j,height),(255,0,0),1)
for k in range (0,height,43):
for l in range(0,width,43):
pose.append([])
pose[count].append(l)
pose[count].append(k)
count+=1
#cv2.circle(img,(l,k), 4, (0,0,255), -1)
for i in xrange(nodes+1):
Q.append([])
for j in range(0,actions):
Q[i].append(0.0)
if comp_h > comp_v:
go=comp_h
else :
go=comp_v
for i in range(1,comp_h):
top.append(i)
for i in range(1,go):
left.append(i*(comp_h))
right.append(i*(comp_h)-1)
for j in range(max(left),nodes):
bottom.append(j)
right.append(nodes-1)
def possible_node(state,action):
if state in top:
if action==5 or action==6 or action==7 :
return nodes
if state in left:
if action==3 or action==4 or action==5:
return nodes
if state in right:
if action==7 or action==0 or action==1:
return nodes
if state in bottom:
if action==1 or action==2 or action==3:
return nodes
if (action==0):
state+=1
elif (action==1):
state+=comp_h+1
elif (action==2):
state+=comp_h
elif (action==3):
state+=comp_h-1
elif (action==4):
state-=1
elif (action==5):
state-=comp_h+1
elif (action==6):
state-=comp_h
elif(action==7):
state-=comp_h-1
if( state<0 or state >nodes-1):
return nodes
else:
return state
def chooseAnaction(i):
node=i
count=0
#print cur_node
action=randint(0,actions-1)
node_to_go=possible_node(node,action)
x1,y1,x2,y2=0,0,0,0
if node_to_go !=nodes:
node_1=pose[node]
node_2=pose[node_to_go]
x1=node_1[1]
y1=node_1[0]
x2=node_2[1]
y2=node_2[0]
#print x1,y1,x2,y2
#print edges[x1,y1],edges[x2,y2]
if x2 < x1 :
temp=x1
x1=x2
x2=temp
if y2 < y1:
temp=y1
y1=y2
y2=temp
if x1==x2:
for i in range(y1,y2,5):
if edges[x1,i]==255:
count+=1
elif y1==y2:
for j in range(x1,x2,5):
if edges[j,y1]==255:
count+=1
else:
for k,l in zip(range(x1,x2,5),range(y1,y2,5)):
if edges[k,l]==255:
count+=1
if count>=1:
node_to_go=nodes
if (node_to_go==goal):
reward=100
elif (node_to_go==nodes):
reward=-100
else:
reward=-1
if (reward >=-1):
Q[node][action]=int(reward+gamma*(max(Q[node_to_go])))
#cur_node=node_to_go
start_time=0
def shortest():
global start_time
start_time = time.clock()
for i in range(0,iterate):
for j in range(0,nodes):
chooseAnaction(j)
shortest()
edges=cv2.cvtColor(edges,cv2.COLOR_GRAY2BGR)
def path(node,goal):
count=0
if (max(Q[node])==0):
print '''
oooops !!! I cant reach the node obstacle is there
change the goal node to adjacent nodes
'''
else:
while(node!=goal):
ind=[j for j,x in enumerate(Q[node]) if x==max(Q[node])]
action=ind[0]
for l in range(len(ind)):
if (path_cost[ind[l]] < path_cost[action]):
action=ind[l]
print node,action
state=possible_node(node,action)
#print state
cv2.circle(edges,(pose[state][0],pose[state][1]), 4, (0,0,255), -1)
cv2.line(edges,(pose[node][0],pose[node][1]),(pose[state][0],pose[state][1]),(0,0,200),5)
cv2.circle(img1,(pose[state][0],pose[state][1]), 4, (0,0,255), -1)
cv2.line(img1,(pose[node][0],pose[node][1]),(pose[state][0],pose[state][1]),(0,0,200),5)
node=state
print node
path(node,goal)
print "Total time taken",time.clock() - start_time, "seconds"
cv2.imshow('edges',edges)
cv2.imshow('grid',img1)
#cv2.imshow('edges',edges)
cv2.waitKey(0)
| true |
ba49726f6ab1c871cf0c1a0f9dfa70990df7cb25
|
Python
|
mintchatzis/Algorithms_from_scratch
|
/CS core algorithms/Graph_Algos/graph.py
|
UTF-8
| 4,796 | 4 | 4 |
[] |
no_license
|
class Graph():
'''Graph representation using dictionary of sets
connections: list of tuples, eg. ('a','b'), meaning nodes a and b are linked
directed: if True, graph is directed
'''
def __init__(self, connections = None, directed = False):
self.__graph = {}
self.__directed = directed
self.__visited = self.init_visited() #keeps track of 'visited' status of each node
self.__data = self.__init_data()
#construct graph from given connections
if connections is not None:
for pair in connections:
self.add(pair)
def __init_vertex(self,vertex):
self.__graph[vertex] = set()
self.__data[vertex] = None
self.__visited[vertex] = False
def add(self,pair):
'''
Adds connection to graph.
Comp: O(1)
'''
node1,node2 = pair
#Create nodes, if they don't already exist
if node1 not in self.__graph:
self.__init_vertex(node1)
if node2 not in self.__graph:
self.__init_vertex(node2)
#Add one-way connection between nodes
self.__graph[node1].add(node2)
#for undirected graph, add reverse connection too
if (not self.__directed):
self.__graph[node2].add(node1)
return
def init_visited(self):
'''
Returns a dictionary which stores the 'visited status' of every node
Comp: O(n)
'''
temp = {}
for key in self.__graph.keys():
temp[key] = False
return temp
def __init_data(self):
'''
Returns a dictionary representing the data stored in each graph node
all initialized to None
Comp: O(n)
'''
temp = {}
for key in self.__graph.keys():
temp[key] = None
return temp
def get_neighbors(self,vertex):
return self.__graph[vertex]
def get_all_data(self):
return self.__data
def get_data(self,vertex):
return self.__data[vertex]
def get_vertices(self):
return self.__graph.keys()
def get_graph_rep(self):
return self.__graph
def get_visited(self,vertex):
'''
Returns true if given vertex has been visited
'''
return self.__visited[vertex]
def get_size(self):
return len(self.__graph)
def set_visited(self,vertex,visited):
self.__visited[vertex] = visited
return
def set_data(self,vertex,value):
self.__data[vertex] = value
return
def is_empty(self):
return self.__graph == {}
if __name__ == "__main__":
init_connections = [ ('a', 'c'), ('a', 'b'),
('b', 'd'),('b', 'e'),
('c', 'f'),
('d', 'g'),
('e', 'g'),
('f', 'g') ]
extra_connections = [('g','h'),
('h','f')]
g_undir = Graph(init_connections)
g_dir = Graph(init_connections,directed=True)
print(f"Your undirected graph, sir: \n {g_undir.get_graph_rep()}")
print(f"Your directed graph, sir: \n {g_dir.get_graph_rep()}")
tests = []
#Graph initialization with 0 arguments
def test1():
pass
tests.append(test1)
#Graph initialization with given edges, directed = False
def test2():
g_undir = Graph(init_connections)
expected = {
"a":{"b","c"},
"b":{"a","d","e"},
"c":{"a","f"},
"d":{"b","g"},
"e":{"b","g"},
"f":{"c","g"},
"g":{"d","e","f"} }
assert g_undir.graph == expected
#tests.append(test2)
##Graph initialization with given edges, directed = True
def test3():
pass
tests.append(test3)
def run_tests():
for test in tests:
test()
print(f"All {len(tests)} tests passed.")
#run_tests()
"""
STORAGE
_______
def get_edges(g):
'''returns edges of a graph'''
edges = []
for key in g.keys():
for v in g[key]:
rev = (v,key)
if rev not in edges:
edges.append((key,v))
return edges
edges = get_edges(undir_graph)
print(edges)
undir_graph = {
"a":{"b","c"},
"b":{"a","d","e"},
"c":{"a","f"},
"d":{"b","g"},
"e":{"b","g"},
"f":{"c","g"},
"g":{"d","e","f"}}
dir_graph = {
"a":{"b","c"},
"b":{"d","e"},
"c":{"f"},
"d":{"g"},
"e":{"g"},
"f":{"g"},
"g":set()}
"""
| true |
4e4c039d51c5f829e93eb42a4a4fd8d4c0c0a1b4
|
Python
|
dohyunjang/graph-adaptive-activation-functions-gnns
|
/Utils/graphML.py
|
UTF-8
| 19,844 | 3.234375 | 3 |
[] |
no_license
|
# 2018/11/01~2018/07/12
# Fernando Gama, fgama@seas.upenn.edu.
"""
graphML.py Module for basic GSP and graph machine learning functions.
Functionals
LSIGF: Applies a linear shift-invariant graph filter
Activation Functions - Nonlinearities (nn.Module)
MaxLocalActivation: Creates a localized max activation function layer
MedianLocalActivation: Creates a localized median activation function layer
NoActivation: Creates a layer for no activation function
Summarizing Functions - Pooling (nn.Module)
NoPool: No summarizing function.
"""
import math
import numpy as np
import torch
import torch.nn as nn
import Utils.graphTools as graphTools
zeroTolerance = 1e-9 # Values below this number are considered zero.
infiniteNumber = 1e12 # infinity equals this number
# WARNING: Only scalar bias.
def LSIGF(h, S, x, b=None):
"""
LSIGF(filter_taps, GSO, input, bias=None) Computes the output of a linear
shift-invariant graph filter on input and then adds bias.
Denote as G the number of input features, F the number of output features,
E the number of edge features, K the number of filter taps, N the number of
nodes, S_{e} in R^{N x N} the GSO for edge feature e, x in R^{G x N} the
input data where x_{g} in R^{N} is the graph signal representing feature
g, and b in R^{F x N} the bias vector, with b_{f} in R^{N} representing the
bias for feature f.
Then, the LSI-GF is computed as
y_{f} = \sum_{e=1}^{E}
\sum_{k=0}^{K-1}
\sum_{g=1}^{G}
[h_{f,g,e}]_{k} S_{e}^{k} x_{g}
+ b_{f}
for f = 1, ..., F.
Inputs:
filter_taps (torch.tensor): array of filter taps; shape:
output_features x edge_features x filter_taps x input_features
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
input (torch.tensor): input signal; shape:
batch_size x input_features x number_nodes
bias (torch.tensor): shape: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; shape:
batch_size x output_features x number_nodes
"""
# The basic idea of what follows is to start reshaping the input and the
# GSO so the filter coefficients go just as a very plain and simple
# linear operation, so that all the derivatives and stuff on them can be
# easily computed.
# h is output_features x edge_weights x filter_taps x input_features
# S is edge_weighs x number_nodes x number_nodes
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
#print(f"Graph Filter {np.shape(x)}")
# Get the parameter numbers:
F = h.shape[0]
E = h.shape[1]
K = h.shape[2]
G = h.shape[3]
assert S.shape[0] == E
N = S.shape[1]
assert S.shape[2] == N
B = x.shape[0]
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation we've been using:
# h in F x E x K x G
# S in E x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
# Now, we have x in B x G x N and S in E x N x N, and we want to come up
# with matrix multiplication that yields z = x * S with shape
# B x E x K x G x N.
# For this, we first add the corresponding dimensions
x = x.reshape([B, 1, G, N])
S = S.reshape([1, E, N, N])
z = x.reshape([B, 1, 1, G, N]).repeat(1, E, 1, 1, 1) # This is for k = 0
# We need to repeat along the E dimension, because for k=0, S_{e} = I for
# all e, and therefore, the same signal values have to be used along all
# edge feature dimensions.
for k in range(1,K):
x = torch.matmul(x, S) # B x E x G x N
xS = x.reshape([B, E, 1, G, N]) # B x E x 1 x G x N
z = torch.cat((z, xS), dim = 2) # B x E x k x G x N
# This output z is of size B x E x K x G x N
# Now we have the x*S_{e}^{k} product, and we need to multiply with the
# filter taps.
# We multiply z on the left, and h on the right, the output is to be
# B x N x F (the multiplication is not along the N dimension), so we reshape
# z to be B x N x E x K x G and reshape it to B x N x EKG (remember we
# always reshape the last dimensions), and then make h be E x K x G x F and
# reshape it to EKG x F, and then multiply
y = torch.matmul(z.permute(0, 4, 1, 2, 3).reshape([B, N, E*K*G]),
h.reshape([F, E*K*G]).permute(1, 0)).permute(0, 2, 1)
# And permute again to bring it from B x N x F to B x F x N.
# Finally, add the bias
if b is not None:
y = y + b
return y
class MaxLocalActivation(nn.Module):
# Luana R. Ruiz, rubruiz@seas.upenn.edu, 2019/03/15
"""
MaxLocalActivation creates a localized activation function layer on graphs
Initialization:
MaxLocalActivation(K)
Inputs:
K (int): number of hops (>0)
Output:
torch.nn.Module for a localized max activation function layer
Add graph shift operator:
MaxLocalActivation.addGSO(GSO) Before applying the filter, we need to
define the GSO that we are going to use. This allows to change the GSO
while using the same filtering coefficients (as long as the number of
edge features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = MaxLocalActivation(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x dim_features x number_nodes
Outputs:
y (torch.tensor): activated data; shape:
batch_size x dim_features x number_nodes
"""
def __init__(self, K = 1):
super().__init__()
assert K > 0 # range has to be greater than 0
self.K = K
self.S = None # no GSO assigned yet
self.N = None # no GSO assigned yet (N learned from the GSO)
self.neighborhood = 'None' # no neighborhoods calculated yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(1,self.K+1))
# Initialize parameters
self.reset_parameters()
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
# The neighborhood matrix has to be a tensor of shape
# nOutputNodes x maxNeighborhoodSize
neighborhood = []
maxNeighborhoodSizes = []
for k in range(1,self.K+1):
# For each hop (0,1,...) in the range K
thisNeighborhood = graphTools.computeNeighborhood(
np.array(self.S.cpu()), k, outputType='matrix')
# compute the k-hop neighborhood
neighborhood.append(torch.tensor(thisNeighborhood))
maxNeighborhoodSizes.append(thisNeighborhood.shape[1])
self.maxNeighborhoodSizes = maxNeighborhoodSizes
self.neighborhood = neighborhood
def forward(self, x):
#print(f"forward in activation function {x.size()}")
# x should be of shape batchSize x dimNodeSignals x N
batchSize = x.shape[0]
dimNodeSignals = x.shape[1]
assert x.shape[2] == self.N
# And given that the self.neighborhood is already a torch.tensor matrix
# we can just go ahead and get it.
# So, x is of shape B x F x N. But we need it to be of shape
# B x F x N x maxNeighbor. Why? Well, because we need to compute the
# maximum between the value of each node and those of its neighbors.
# And we do this by applying a torch.max across the rows (dim = 3) so
# that we end up again with a B x F x N, but having computed the max.
# How to fill those extra dimensions? Well, what we have is neighborhood
# matrix, and we are going to use torch.gather to bring the right
# values (torch.index_select, while more straightforward, only works
# along a single dimension).
# Each row of the matrix neighborhood determines all the neighbors of
# each node: the first row contains all the neighbors of the first node,
# etc.
# The values of the signal at those nodes are contained in the dim = 2
# of x. So, just for now, let's ignore the batch and feature dimensions
# and imagine we have a column vector: N x 1. We have to pick some of
# the elements of this vector and line them up alongside each row
# so that then we can compute the maximum along these rows.
# When we torch.gather along dimension 0, we are selecting which row to
# pick according to each column. Thus, if we have that the first row
# of the neighborhood matrix is [1, 2, 0] means that we want to pick
# the value at row 1 of x, at row 2 of x in the next column, and at row
# 0 of the last column. For these values to be the appropriate ones, we
# have to repeat x as columns to build our b x F x N x maxNeighbor
# matrix.
xK = x # xK is a tensor aggregating the 0-hop (x), 1-hop, ..., K-hop
# max's it is initialized with the 0-hop neigh. (x itself)
xK = xK.unsqueeze(3) # extra dimension added for concatenation ahead
x = x.unsqueeze(3) # B x F x N x 1
# And the neighbors that we need to gather are the same across the batch
# and feature dimensions, so we need to repeat the matrix along those
# dimensions
for k in range(1,self.K+1):
x_aux = x.repeat([1, 1, 1, self.maxNeighborhoodSizes[k-1]])
gatherNeighbor = self.neighborhood[k-1].reshape(
[1,
1,
self.N,
self.maxNeighborhoodSizes[k-1]]
)
gatherNeighbor = gatherNeighbor.repeat([batchSize,
dimNodeSignals,
1,
1])
# And finally we're in position of getting all the neighbors in line
xNeighbors = torch.gather(x_aux, 2, gatherNeighbor.long().cuda())
# B x F x nOutput x maxNeighbor
# Note that this gather function already reduces the dimension to
# nOutputNodes.
# And proceed to compute the maximum along this dimension
v, _ = torch.max(xNeighbors, dim = 3)
v = v.unsqueeze(3) # to concatenate with xK
xK = torch.cat((xK,v),3)
out = torch.matmul(xK,self.weight.unsqueeze(2))
# multiply each k-hop max by corresponding weight
out = out.reshape([batchSize,dimNodeSignals,self.N])
return out
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.K)
self.weight.data.uniform_(-stdv, stdv)
def extra_repr(self):
if self.neighborhood is not None:
reprString = "neighborhood stored"
else:
reprString = "NO neighborhood stored"
return reprString
class MedianLocalActivation(nn.Module):
# Luana R. Ruiz, rubruiz@seas.upenn.edu, 2019/03/27
"""
MedianLocalActivation creates a localized activation function layer on
graphs
Initialization:
MedianLocalActivation(K)
Inputs:
K (int): number of hops (>0)
Output:
torch.nn.Module for a localized median activation function layer
Add graph shift operator:
MedianLocalActivation.addGSO(GSO) Before applying the filter, we need
to define the GSO that we are going to use. This allows to change the
GSO while using the same filtering coefficients (as long as the number
of edge features is the same; but the number of nodes can change).
This function also calculates the 0-,1-,...,K-hop neighborhoods of every
node
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = MedianLocalActivation(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x dim_features x number_nodes
Outputs:
y (torch.tensor): activated data; shape:
batch_size x dim_features x number_nodes
"""
def __init__(self, K = 1):
super().__init__()
assert K > 0 # range has to be greater than 0
self.K = K
self.S = None # no GSO assigned yet
self.N = None # no GSO assigned yet (N learned from the GSO)
self.neighborhood = 'None' # no neighborhoods calculated yet
self.masks = 'None' # no mask yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(1,self.K+1))
# Initialize parameters
self.reset_parameters()
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
# The neighborhood matrix has to be a tensor of shape
# nOutputNodes x maxNeighborhoodSize
neighborhood = []
for k in range(1,self.K+1):
# For each hop (0,1,...) in the range K
thisNeighborhood = graphTools.computeNeighborhood(
np.array(self.S.cpu()), k, outputType='list')
# compute the k-hop neighborhood
neighborhood.append(thisNeighborhood)
self.neighborhood = neighborhood
def forward(self, x):
#print(f"Activation Function {np.shape(x)}")
# x should be of shape batchSize x dimNodeSignals x N
batchSize = x.shape[0]
dimNodeSignals = x.shape[1]
assert x.shape[2] == self.N
xK = x # xK is a tensor aggregating the 0-hop (x), 1-hop, ..., K-hop
# max's
# It is initialized with the 0-hop neigh. (x itself)
xK = xK.unsqueeze(3) # extra dimension added for concatenation ahead
#x = x.unsqueeze(3) # B x F x N x 1
for k in range(1,self.K+1):
kHopNeighborhood = self.neighborhood[k-1]
# Fetching k-hop neighborhoods of all nodes
kHopMedian = torch.empty(0)
# Initializing the vector that will contain the k-hop median for
# every node
for n in range(self.N):
# Iterating over the nodes
# This step is necessary because here the neighborhoods are
# lists of lists. It is impossible to pad them and feed them as
# a matrix, as this would impact the outcome of the median
# operation
nodeNeighborhood = torch.tensor(np.array(kHopNeighborhood[n]))
neighborhoodLen = len(nodeNeighborhood)
gatherNode = nodeNeighborhood.reshape([1, 1, neighborhoodLen])
gatherNode = gatherNode.repeat([batchSize, dimNodeSignals, 1])
# Reshaping the node neighborhood for the gather operation
xNodeNeighbors = torch.gather(x, 2, gatherNode.long().cuda())
# Gathering signal values in the node neighborhood
nodeMedian,_ = torch.median(xNodeNeighbors, dim = 2,
keepdim=True)
# Computing the median in the neighborhood
kHopMedian = torch.cat([kHopMedian.cuda(),nodeMedian.cuda()],2)
# Concatenating k-hop medians node by node
kHopMedian = kHopMedian.unsqueeze(3) # Extra dimension for
# concatenation with the previous (k-1)-hop median tensor
xK = torch.cat([xK,kHopMedian],3)
out = torch.matmul(xK,self.weight.unsqueeze(2))
# Multiplying each k-hop median by corresponding trainable weight
out = out.reshape([batchSize,dimNodeSignals,self.N])
return out
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.K)
self.weight.data.uniform_(-stdv, stdv)
def extra_repr(self):
if self.neighborhood is not None:
reprString = "neighborhood stored"
else:
reprString = "NO neighborhood stored"
return reprString
class NoActivation(nn.Module):
"""
NoActivation creates an activation layer that does nothing
It is for completeness, to be able to switch between linear models
and nonlinear models, without altering the entire architecture model
Initialization:
NoActivation()
Output:
torch.nn.Module for an empty activation layer
Forward call:
y = NoActivation(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x dim_features x number_nodes
Outputs:
y (torch.tensor): activated data; shape:
batch_size x dim_features x number_nodes
"""
def __init__(self):
super().__init__()
def forward(self, x):
return x
def extra_repr(self):
reprString = "No Activation Function"
return reprString
class NoPool(nn.Module):
"""
This is a pooling layer that actually does no pooling. It has the same input
structure and methods of MaxPoolLocal() for consistency. Basically, this
allows us to change from pooling to no pooling without necessarily creating
a new architecture.
In any case, we're pretty sure this function should never ship, and pooling
can be avoided directly when defining the architecture.
"""
def __init__(self, nInputNodes, nOutputNodes, nHops):
super().__init__()
self.nInputNodes = nInputNodes
self.nOutputNodes = nOutputNodes
self.nHops = nHops
self.neighborhood = None
def addGSO(self, GSO):
# This is necessary to keep the form of the other pooling strategies
# within the SelectionGNN framework. But we do not care about any GSO.
pass
def forward(self, x):
# x should be of shape batchSize x dimNodeSignals x nInputNodes
assert x.shape[2] == self.nInputNodes
# Check that there are at least the same number of nodes that
# we will keep (otherwise, it would be unpooling, instead of
# pooling)
assert x.shape[2] >= self.nOutputNodes
# And do not do anything
return x
def extra_repr(self):
reprString = "in_dim=%d, out_dim=%d, number_hops = %d, " % (
self.nInputNodes, self.nOutputNodes, self.nHops)
reprString += "no neighborhood needed"
return reprString
| true |
6b5beb64154d47fbb3c45840c9ce13882a8ea367
|
Python
|
kingtheoden/leet-code
|
/solutions/0001 - Two Sum/two_sum.py
|
UTF-8
| 372 | 2.859375 | 3 |
[] |
no_license
|
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
i = 0
li = []
for i, num in enumerate(nums):
if num in li:
return[li.index(num),i]
else:
li.append(target - num)
return [0,0]
| true |
53a54d3d21ee4ffe0490569f64440f5353aa4630
|
Python
|
thxa/test_python
|
/python_beyond_basics/Introspection/inspect_test.py
|
UTF-8
| 1,151 | 3.203125 | 3 |
[] |
no_license
|
import inspect
import sorted_set
# from sorted_set is import itertools.chain
from sorted_set import chain
# This is like chain
def chains(*iterables):
# result = (element for it in iterables for element in it)
# return result
for it in iterables:
for element in it:
yield element
def main():
# Is sorted_set Moudule
print(inspect.ismodule(sorted_set))
# Get all in sorted_set Moudule
print(inspect.getmembers(sorted_set))
# Get all in inspect
print(dir(inspect))
# getmember only class
print(inspect.getmembers(sorted_set, inspect.isclass))
# getmember only function of SortedSet
print(inspect.getmembers(sorted_set.SortedSet, inspect.isfunction))
# from sorted_set is import itertools.chain
print(list(chain([0, 1, 2, 3, 4], [5, 6, 7, 8, 9])))
# chains like chain
print(list(chains([0, 1, 2, 3, 4], [5, 6, 7, 8, 9])))
print(chains([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]))
init_sig = inspect.signature(sorted_set.SortedSet.__init__)
print(init_sig)
print(init_sig.parameters)
print(repr(init_sig.parameters['items'].default))
print(str(init_sig))
print(inspect.signature(abs))
if __name__ == '__main__':
main()
| true |
b18734ab2ce7156f3c35f1f918ef4e68091a63fd
|
Python
|
hadim/lsysdrawer
|
/src/viewer/opengl/utilities/myMath.py
|
UTF-8
| 7,551 | 3.015625 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#-*- coding: utf-8 -*-
# myMath.py
# Copyright (c) 2011, see AUTHORS
# All rights reserved.
# This file is part of Lsysdrawer.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# Neither the name of the ProfileExtractor nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#-*- coding: utf-8 -*-
from math import sqrt, pow, acos, pi
import numpy as Numeric
Epsilon = 1.0e-5
def xfrange(start, stop, step):
while start < stop:
yield start
start += step
class Point():
"""
"""
def __init__(self, x, y, z):
"""
"""
self.x = x
self.y = y
self.z = z
def __add__(self, p):
"""
"""
return Point(self.x + p.x, self.y + p.y, self.z + p.z)
def __str__(self):
"""
"""
return str([self.x, self.y, self.z])
class Vector():
"""
"""
def __init__(self):
pass
def add(self, a, b):
""" vector a plus vector b = resulting vector """
result = Point(a.x + b.x, a.y + b.y, a.z + b.z)
return result
def subtract(self, a, b):
""" vector a minus vector b = resulting vector vector """
result = Point(a.x - b.x, a.y - b.y, a.z - b.z)
return result
def multiply(self, scalar, vector):
""" multiply a vector by a scalar """
result = [scalar * vector.x, scalar * vector.y, scalar * vector.z]
return result
def dotproduct(self, a, b):
""" take the dot product of two vectors: a . b """
result = a.x * b.x + a.y * b.y + a.z * b.z
return result
def crossproduct(self, a, b):
""" take the cross product of two vectors: a X b """
cross = Point(0,0,0)
cross.x = a.y * b.z - a.z * b.y
cross.y = a.z * b.x - a.x * b.z
cross.z = a.x * b.y - a.y * b.x
result = cross
return result
def mag(self, a):
""" return the magnitude (length) of a vector """
result = (a.x**2 + a.y**2 + a.z**2)**(0.5)
return result
def normalize(self, a):
""" convert a vector to a unit vector (length of 1) """
magnitude = self.mag(a)
result = Point(a.x/magnitude, a.y/magnitude, a.z/magnitude)
return result
def angle(self, a, b):
""" angle in degrees between two vectors """
result = acos(self.dotproduct(a,b) / (self.mag(a)* self.mag(b))) # radians
result = result * (180 / pi) # degrees
return result
def sumDot(a, b):
"""
"""
return Numeric.dot(a, b)
def Matrix4fT ():
"""
"""
return Numeric.identity(4, 'f')
def Matrix3fT():
"""
"""
return Numeric.identity(3, 'f')
def Quat4fT():
"""
"""
return Numeric.zeros(4, 'f')
def Vector3fT():
"""
"""
return Numeric.zeros (3, 'f')
def Point2fT(x = 0.0, y = 0.0):
"""
"""
pt = Numeric.zeros (2, 'f')
pt [0] = x
pt [1] = y
return pt
def Vector3fDot(u, v):
"""
Dot product of two 3f vectors
"""
dotprod = Numeric.dot (u,v)
return dotprod
def Vector3fCross(u, v):
"""
Cross product of two 3f vectors
"""
X = 0
Y = 1
Z = 2
cross = Numeric.zeros (3, 'f')
cross [X] = (u[Y] * v[Z]) - (u[Z] * v[Y])
cross [Y] = (u[Z] * v[X]) - (u[X] * v[Z])
cross [Z] = (u[X] * v[Y]) - (u[Y] * v[X])
return cross
def Vector3fLength(u):
"""
"""
mag_squared = sumDot(u,u)
mag = sqrt (mag_squared)
return mag
def Matrix3fSetIdentity():
"""
"""
return Numeric.identity (3, 'f')
def Matrix3fMulMatrix3f(matrix_a, matrix_b):
"""
"""
return sumDot( matrix_a, matrix_b )
def Matrix4fSVD(NewObj):
"""
"""
X = 0
Y = 1
Z = 2
s = sqrt(
((NewObj [X][X] * NewObj [X][X]) +(NewObj [X][Y] * NewObj [X][Y]) +(NewObj [X][Z] * NewObj [X][Z]) +
(NewObj [Y][X] * NewObj [Y][X]) +(NewObj [Y][Y] * NewObj [Y][Y]) +(NewObj [Y][Z] * NewObj [Y][Z]) +
(NewObj [Z][X] * NewObj [Z][X]) +(NewObj [Z][Y] * NewObj [Z][Y]) +(NewObj [Z][Z] * NewObj [Z][Z]) ) / 3.0 )
return s
def Matrix4fSetRotationScaleFromMatrix3f(NewObj, three_by_three_matrix):
"""
Modifies NewObj in-place by replacing its upper 3x3 portion from
the passed in 3x3 matrix.
NewObj = Matrix4fT()
"""
NewObj [0:3,0:3] = three_by_three_matrix
return NewObj
def Matrix4fSetRotationFromMatrix3f(NewObj, three_by_three_matrix):
"""
Sets the rotational component(upper 3x3) of this matrix to the
matrix values in the T precision Matrix3d argument; the other
elements of this matrix are unchanged; a singular value
decomposition is performed on this object's upper 3x3 matrix to
factor out the scale, then this object's upper 3x3 matrix
components are replaced by the passed rotation components, and
then the scale is reapplied to the rotational components.
@param three_by_three_matrix T precision 3x3 matrix
"""
scale = Matrix4fSVD(NewObj)
NewObj = Matrix4fSetRotationScaleFromMatrix3f(NewObj, three_by_three_matrix);
scaled_NewObj = NewObj * scale # Matrix4fMulRotationScale(NewObj, scale);
return scaled_NewObj
def Matrix3fSetRotationFromQuat4f(q1):
"""
Converts the H quaternion q1 into a new equivalent 3x3 rotation
matrix.
"""
X = 0
Y = 1
Z = 2
W = 3
NewObj = Matrix3fT()
n = sumDot(q1, q1)
s = 0.0
if(n > 0.0):
s = 2.0 / n
xs = q1 [X] * s; ys = q1 [Y] * s; zs = q1 [Z] * s
wx = q1 [W] * xs; wy = q1 [W] * ys; wz = q1 [W] * zs
xx = q1 [X] * xs; xy = q1 [X] * ys; xz = q1 [X] * zs
yy = q1 [Y] * ys; yz = q1 [Y] * zs; zz = q1 [Z] * zs
# This math all comes about by way of algebra, complex math, and trig identities.
# See Lengyel pages 88-92
NewObj [X][X] = 1.0 - (yy + zz); NewObj [Y][X] = xy - wz; NewObj [Z][X] = xz + wy;
NewObj [X][Y] = xy + wz; NewObj [Y][Y] = 1.0 -(xx + zz); NewObj [Z][Y] = yz - wx;
NewObj [X][Z] = xz - wy; NewObj [Y][Z] = yz + wx; NewObj [Z][Z] = 1.0 -(xx + yy)
return NewObj
| true |
c94859089bf6b06eb3ab67723b5d080c39499fee
|
Python
|
francisamani/pygametrial
|
/generic_game.py
|
UTF-8
| 733 | 3.515625 | 4 |
[] |
no_license
|
import pygame
# Initialising the module
pygame.init()
# Placing limits to the display using a Tupple
gameDisplay = pygame.display.set_mode((800,600))
# Setting the name of the game
pygame.display.set_caption('Car Chasers')
# Setting the timing of the game
clock = pygame.time.Clock()
# Placing Crashing as False
crashed = False
while not crashed:
# Placing an event to track mouse and keyboard presses
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
print(event)
# To update the entire diplay
pygame.display.update()
# Setting the in-game frames per second
clock.tick(60)
# Quiting the game
pygame.quit()
quit()
| true |
01608a304b6d1ece2983e0f85646ee30da1f2a21
|
Python
|
harry-hao/wormhole
|
/udp-py/udp/connection.py
|
UTF-8
| 6,687 | 2.609375 | 3 |
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
#
# UDP: User Datagram Protocol
#
# Written in 2020 by Moky <albert.moky@gmail.com>
#
# ==============================================================================
# MIT License
#
# Copyright (c) 2020 Albert Moky
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import time
from typing import Optional
from .packet import DatagramPacket
from .status import ConnectionStatus
class Connection:
EXPIRES = 28 # seconds
"""
Max count for caching packages
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Each UDP data package is limited to no more than 576 bytes, so set the
MAX_CACHE_SPACES to about 200,000 means it would take up to 100MB memory
for caching in one connection.
"""
MAX_CACHE_SPACES = 1024 * 200
def __init__(self, local_address: tuple, remote_address: tuple):
super().__init__()
self.__local_address = local_address
self.__remote_address = remote_address
# connecting status
self.__status = ConnectionStatus.Default
# initialize times to expired
now = time.time()
self.__last_sent_time = now - self.EXPIRES - 1
self.__last_received_time = now - self.EXPIRES - 1
# received packages
self.__cargoes = []
@property
def local_address(self) -> tuple:
""" local ip, port """
return self.__local_address
@property
def remote_address(self) -> tuple:
""" remote ip, port """
return self.__remote_address
def is_connected(self, now: float) -> bool:
status = self.get_status(now=now)
return ConnectionStatus.is_connected(status=status)
def is_expired(self, now: float) -> bool:
status = self.get_status(now=now)
return ConnectionStatus.is_expired(status=status)
def is_error(self, now: float) -> bool:
status = self.get_status(now=now)
return ConnectionStatus.is_error(status=status)
def get_status(self, now: float):
"""
Get connection status
:param now: timestamp in seconds
:return: new status
"""
# pre-checks
if now < self.__last_received_time + self.EXPIRES:
# received response recently
if now < self.__last_sent_time + self.EXPIRES:
# sent recently, set status = 'connected'
self.__status = ConnectionStatus.Connected
else:
# long time no sending, set status = 'expired'
self.__status = ConnectionStatus.Expired
return self.__status
if self.__status != ConnectionStatus.Default:
# any status except 'initialized'
if now > self.__last_received_time + (self.EXPIRES << 2):
# long long time no response, set status = 'error'
self.__status = ConnectionStatus.Error
return self.__status
# check with current status
if self.__status == ConnectionStatus.Default:
# case: 'default'
if now < self.__last_sent_time + self.EXPIRES:
# sent recently, change status to 'connecting'
self.__status = ConnectionStatus.Connecting
elif self.__status == ConnectionStatus.Connecting:
# case: 'connecting'
if now > self.__last_sent_time + self.EXPIRES:
# long time no sending, change status to 'not_connect'
self.__status = ConnectionStatus.Default
elif self.__status == ConnectionStatus.Connected:
# case: 'connected'
if now > self.__last_received_time + self.EXPIRES:
# long time no response, needs maintaining
if now < self.__last_sent_time + self.EXPIRES:
# sent recently, change status to 'maintaining'
self.__status = ConnectionStatus.Maintaining
else:
# long time no sending, change status to 'maintain_expired'
self.__status = ConnectionStatus.Expired
elif self.__status == ConnectionStatus.Expired:
# case: 'maintain_expired'
if now < self.__last_sent_time + self.EXPIRES:
# sent recently, change status to 'maintaining'
self.__status = ConnectionStatus.Maintaining
elif self.__status == ConnectionStatus.Maintaining:
# case: 'maintaining'
if now > self.__last_sent_time + self.EXPIRES:
# long time no sending, change status to 'maintain_expired'
self.__status = ConnectionStatus.Expired
return self.__status
def update_sent_time(self, now: float):
self.__last_sent_time = now
def update_received_time(self, now: float):
self.__last_received_time = now
def receive(self) -> Optional[DatagramPacket]:
"""
Get received data package from buffer, non-blocked
:return: received data and source address
"""
if len(self.__cargoes) > 0:
return self.__cargoes.pop(0)
def cache(self, packet: DatagramPacket) -> Optional[DatagramPacket]:
# 1. check memory cache status
ejected = None
if self._is_cache_full(count=len(self.__cargoes)):
# drop the first package
ejected = self.__cargoes.pop(0)
# 2. append the new package to the end
self.__cargoes.append(packet)
return ejected
def _is_cache_full(self, count: int) -> bool:
return count > self.MAX_CACHE_SPACES
| true |
73cc00e6c617a9d6bf512e0a106865ed17fe263d
|
Python
|
rajeshvaya/reservoir
|
/src/src/ReservoirSocket.py
|
UTF-8
| 5,081 | 2.9375 | 3 |
[] |
no_license
|
'''
This is the wrapper for socket class which will contain creation of TCP & UDP sockets and interactions wit the sockets.
It should als maintain the threads for each client connections
'''
import sys
import os
import socket
import threading
import json
import logging
from thread import start_new_thread
from ReservoirResponse import ReservoirResponse
class ReservoirSocket:
def __init__(self, reservoir, configs):
self.configs = configs
self.reservoir = reservoir
self.host = self.reservoir.host
self.port = self.reservoir.port
self.connections = []
# set the protocol to follow
protocol = self.configs.get('protocol', 'TCP')
self.protocol = protocol if protocol in ['TCP', 'UDP'] else 'TCP'
# lets go
self.socket = self.create_socket()
pass
def create_socket(self):
self.reservoir.logger.info("creating %s socket now" % (self.protocol))
if self.protocol == 'TCP':
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif self.protocol == 'UDP':
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
else:
return socket.socket
def bind(self):
if self.protocol == 'TCP':
return self.tcp_bind();
elif self.protocol == 'UDP':
return self.udp_bind();
return False
def listen(self):
if self.protocol == 'TCP':
return self.tcp_listen();
elif self.protocol == 'UDP':
return self.udp_listen();
def open(self):
if self.protocol == 'TCP':
self.tcp_open();
elif self.protocol == 'UDP':
self.udp_open();
# TCP functions here
def tcp_bind(self):
self.reservoir.logger.info("binding the socket on %s:%d" % (self.host, self.port))
try:
self.socket.bind((self.host, self.port))
return True
except Exception as e:
self.reservoir.logger.error(str(e))
return False
def tcp_listen(self):
self.socket.listen(self.configs.get('max_clients', 2)) # allow max of 2 clients by default
def tcp_open(self):
# let there be connectivity
while True:
self.reservoir.logger.info("Waiting for client connections")
connection, address = self.socket.accept()
self.connections.append(connection)
self.reservoir.logger.info('%s:%s connected to the server' % (address))
start_new_thread(self.start_tcp_client_thread, (connection,))
# Create new thread for each client. don't let the thread die
def start_tcp_client_thread(self, connection):
try:
while True:
data = connection.recv(self.configs.get('read_buffer', 1024))
if not data:
break;
self.reservoir.process_client_request(connection, data)
connection.close()
# for testing need to close the connection on keyboard interrupt
except MemoryError as e:
print e
# TODO: handle the client data for out of memory issue
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self.reservoir.logger.error("Error occurred while starting TCP client thread : %s" % (str(e)))
self.reservoir.logger.error("Error details: %s %s %s " % (exc_type, fname, exc_tb.tb_lineno))
connection.close()
# UDP functions here
def udp_bind(self):
try:
self.socket.bind((self.host, self.port))
return True
except Exception as e:
self.reservoir.logger.error("Error occurred while starting UDP client thread : %s" % (str(e)))
def udp_listen(self):
# there is no listening in UDP, only fire and forget
pass
def udp_open(self):
while True:
packet = self.socket.recvfrom(self.configs.get('read_buffer', 1024))
data = packet[0]
address = packet[1]
start_new_thread(self.start_udp_client_thread, (address, data))
def start_udp_client_thread(self, address, data):
try:
self.reservoir.process_client_request(address, data)
except MemoryError as e:
self.reservoir.logger.error("Out of memory while processing client request: %s" % (str(e)))
# TODO: handle the client data for out of memory issue
except Exception as e:
self.reservoir.logger.error("Error occurred while processing client request : %s" % (str(e)))
def response(self, connection, response):
if not isinstance(response, ReservoirResponse):
connection.send("None")
if self.protocol == 'TCP':
connection.send(response.get_output())
if self.protocol == 'UDP':
self.socket.sendto(response.get_output(), connection) # over here connection is the host address
| true |
602e4737a93b437f053e3fa0d62e8708fb395bc2
|
Python
|
pdwarkanath/nand2tetris
|
/projects/06/Solutions/prog.txt
|
UTF-8
| 2,602 | 2.515625 | 3 |
[] |
no_license
|
import json
import re
import os
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument("-f", "--filename", required=True, help="Name of the symbolic machine instructions (.asm) file . eg. Max.asm")
args = vars(ap.parse_args())
asm_file = args['filename']
with open('symbol_table.json') as f:
symbol_table = json.load(f)
with open('c_instructions.json') as f:
c_instructions = json.load(f)
with open('d_table.json') as f:
d_table = json.load(f)
with open('j_table.json') as f:
j_table = json.load(f)
def first_pass(f):
line_number = 0
for line in f:
line = re.sub('\s+', '', line) # remove whitespace
line = re.sub('\/\/.*', '', line) # remove comments
if len(line) > 0:
if line[0] == '(':
if line[1:-1] not in symbol_table:
symbol_table[line[1:-1]] = line_number
else:
line_number += 1
with open(asm_file) as f:
first_pass(f)
curr_addr = 16
def read_a_instruction(line):
addr = line[1:]
try:
a = int(addr)
except ValueError:
if addr not in symbol_table:
global curr_addr
symbol_table[addr] = curr_addr
curr_addr += 1
a = symbol_table[addr]
return f'{int(bin(a)[2:]):016d}\n'
def read_c_instruction(line):
c_instr = line.split(';')
if len(c_instr) > 1:
j = c_instr[1]
else:
j = '0'
c_instr = c_instr[0].split('=')
if len(c_instr) > 1:
d = ''.join(sorted(c_instr[0]))
c = c_instr[1]
else:
d = '0'
c = c_instr[0]
return '111' + c_instructions[c]['a'] + c_instructions[c]['c'] + d_table[d] + j_table[j] + '\n'
def second_pass(f):
hack_file = asm_file[:-4] + '.hack'
if os.path.exists(hack_file):
os.remove(hack_file)
line_number = 0
instr = ''
for line in f:
line = re.sub('\s+', '', line) # remove whitespace
line = re.sub('\/\/.*', '', line) # remove comments
if len(line) > 0:
if line[0] == '(':
continue
elif line[0] == '@':
instr += read_a_instruction(line)
line_number += 1
else:
instr += read_c_instruction(line)
line_number += 1
if line_number == 10:
line_number = 0
with open(hack_file, 'a+') as h:
h.write(instr)
instr = ''
with open(hack_file, 'a+') as h:
h.write(instr)
with open(asm_file) as f:
second_pass(f)
| true |
81eff7a9e955e2c241e57137168f42cb8aeff77d
|
Python
|
PatrickGhadban/DailyCodingProblem
|
/daily7.py
|
UTF-8
| 1,418 | 4.0625 | 4 |
[] |
no_license
|
'''
* Difficulty: Medium
* Asked by: Facebook
* Problem: Write a function that rotates a list by k elements.
For example, [1, 2, 3, 4, 5, 6] rotated by two becomes
[3, 4, 5, 6, 1, 2].
Try solving this without creating a copy of the list.
How many swap or move operations do you need?
Time Taken: < 10mins
RunTime: O(N*K)
Space Complexity: O(1)
Description:
Sol #1 - Swap through the list for k.
Sol #2 (Couldn't get working) - Find the gcd of len of the list and number of rotations. Then separate the list into sets the size of the gcd
and swap
'''
def sol1(lst, k):
for x in k:
for i in range(len(lst) - 1):
lst[i], lst[i+1] = lst[i+1], lst[i]
def g_c_d(a, b):
if b == 0:
return a;
else:
return g_c_d(b, a % b)
def rotate(lst, k):
n = len(lst)
if k == 0 or n == 0:
return
if k == n:
return lst
gcd = g_c_d(n, k)
for i in range(0, gcd):
j = i
while True:
index = j + gcd
if index >= n:
break
lst[j], lst[index] = lst[index], lst[j]
j = index
return lst
def main():
print (rotate([1,2,3,4,5,6], 1))
print (rotate([1,2,3,4,5,6], 2))
print (rotate([1,2,3,4,5,6], 3))
print (rotate([1,2,3,4,5,6], 4))
print (rotate([1,2,3,4,5,6], 5))
print (rotate([1,2,3,4,5,6], 6))
if __name__ == "__main__":
main()
| true |
ce1c5670e1baaad23680395e957149c2c9e54401
|
Python
|
ianbstewart/catalan
|
/scripts/experiment_2/experiment_2.py
|
UTF-8
| 6,810 | 2.515625 | 3 |
[] |
no_license
|
"""
Hard-coded version of experiment_2.ipynb and experiment_2_addon.ipynb.
"""
from __future__ import division
import pandas as pd
from argparse import ArgumentParser
from scipy.stats import ttest_1samp
import re
import logging
import os
def run_compare_test(tweet_data_1, tweet_data_2):
relevant_users = set(tweet_data_1.loc[:, 'user'].unique()) & set(tweet_data_2.loc[:, 'user'].unique())
tweet_data_relevant_1 = tweet_data_1[tweet_data_1.loc[:, 'user'].isin(relevant_users)]
tweet_data_relevant_2 = tweet_data_2[tweet_data_2.loc[:, 'user'].isin(relevant_users)]
tweet_count_1 = tweet_data_relevant_1.shape[0]
tweet_count_2 = tweet_data_relevant_2.shape[0]
user_count = len(relevant_users)
logging.info('%d tweets in data 1'%(tweet_count_1))
logging.info('%d tweets in data 2'%(tweet_count_2))
logging.info('%d relevant users'%(user_count))
ca_1 = tweet_data_relevant_1.groupby('user').apply(lambda x: (x.loc[:, 'lang']=='ca').astype(int).sum() / x.shape[0])
ca_2 = tweet_data_relevant_2.groupby('user').apply(lambda x: (x.loc[:, 'lang']=='ca').astype(int).sum() / x.shape[0])
ca_use_diff = ca_1 - ca_2
d_u = ca_use_diff.mean()
N = len(ca_use_diff)
d_u_err = ca_use_diff.std() / N**.5
h_0 = 0.
t_stat, p_val = ttest_1samp(ca_use_diff, h_0)
logging.info('d_u = %.5f, err = %.5f, t=%.3f (p=%.3E)'%(d_u, d_u_err, t_stat, p_val))
stats = pd.Series([tweet_count_1, tweet_count_2, user_count], index=['treatment_tweets', 'control_tweets', 'users'])
results = pd.Series([d_u, d_u_err, t_stat, p_val], index=['d_u', 'd_u_err', 't', 'p'])
return stats, results
def main():
parser = ArgumentParser()
parser.add_argument('--tweet_file', default='../../data/tweets/extra_user_tweets/Jan-01-17_Oct-31-17_user_tweets_final.tsv')
parser.add_argument('--out_dir', default='../../output')
args = parser.parse_args()
tweet_file = args.tweet_file
out_dir = args.out_dir
if(not os.path.exists(out_dir)):
os.mkdir(out_dir)
logging.basicConfig(level=logging.INFO, filemodel='w', format='%(message)s', filename=os.path.join(out_dir,'experiment_2.txt'))
stats_out_file = os.path.join(out_dir, 'experiment_2_tweet_user_counts.tsv')
results_out_file = os.path.join(out_dir, 'experiment_2_results.tsv')
all_stats = pd.DataFrame()
all_results = pd.DataFrame()
## load data
tweet_data = pd.read_csv(tweet_file, sep='\t', index_col=False)
tweet_data.fillna('', inplace=True)
tweet_data.loc[:, 'hashtag_count'] = tweet_data.loc[:, 'hashtags'].apply(lambda x: 0 if x=='' else len(x.split(',')))
# tag @-replies
at_matcher = re.compile('@\w+')
tweet_data.loc[:, 'reply'] = tweet_data.apply(lambda r: int(len(at_matcher.findall(r.loc['text'])) > 0 and r.loc['retweeted']==0), axis=1)
# and hashtag counts
tweet_data.loc[:, 'hashtag_count'] = tweet_data.loc[:, 'hashtags'].apply(lambda x: len(x.split(',')) if x != '' else 0)
tweet_data_original = tweet_data[tweet_data.loc[:, 'retweeted'] == 0]
# language cutoff
lang_conf_cutoff = 0.90
allowed_langs = set(['es', 'ca'])
tweet_data_original_high_conf = tweet_data_original[(tweet_data_original.loc[:, 'lang_conf'] >= lang_conf_cutoff) &
(tweet_data_original.loc[:, 'lang'].isin(allowed_langs))]
# restrict to relevant users
relevant_users = tweet_data_original_high_conf.groupby('user').apply(lambda x: (x.loc[:, 'contains_ref_hashtag'].max()==1 and
x.loc[:, 'contains_ref_hashtag'].min()==0))
relevant_users = relevant_users[relevant_users].index.tolist()
tweet_data_relevant = tweet_data_original_high_conf[tweet_data_original_high_conf.loc[:, 'user'].isin(relevant_users)]
## split into referendum and control data
logging.info('starting referendum versus control test')
tweet_data_ref = tweet_data_relevant[tweet_data_relevant.loc[:, 'contains_ref_hashtag'] == 1]
tweet_data_control = tweet_data_relevant[tweet_data_relevant.loc[:, 'contains_ref_hashtag'] == 0]
### first test: referendum versus non-referendum tweets
## compute probability of Catalan in referendum and control tweets
stats, results = run_compare_test(tweet_data_ref, tweet_data_control)
all_stats = all_stats.append(pd.DataFrame(stats).transpose())
all_results = all_results.append(pd.DataFrame(results).transpose())
### second test: referendum versus non-referendum tweets with hashtags
logging.info('starting referendum versus hashtag control test')
## re-segment data, relevant users
tweet_data_with_hashtags = tweet_data_original_high_conf[tweet_data_original_high_conf.loc[:, 'hashtag_count'] > 0]
relevant_users = tweet_data_with_hashtags.groupby('user').apply(lambda x: (x.loc[:, 'contains_ref_hashtag'].max()==1 and
x.loc[:, 'contains_ref_hashtag'].min()==0))
relevant_users = relevant_users[relevant_users].index.tolist()
tweet_data_relevant_with_hashtags = tweet_data_with_hashtags[tweet_data_with_hashtags.loc[:, 'user'].isin(relevant_users)]
tweet_data_ref = tweet_data_relevant_with_hashtags[tweet_data_relevant_with_hashtags.loc[:, 'contains_ref_hashtag'] == 1]
tweet_data_control = tweet_data_relevant_with_hashtags[tweet_data_relevant_with_hashtags.loc[:, 'contains_ref_hashtag'] == 0]
stats, results = run_compare_test(tweet_data_ref, tweet_data_control)
all_stats = all_stats.append(pd.DataFrame(stats).transpose())
all_results = all_results.append(pd.DataFrame(results).transpose())
## third test: @-replies versus non-replies with hashtag
logging.info('starting reply versus not-reply test')
reply_data = tweet_data_original_high_conf[(tweet_data_original_high_conf.loc[:, 'reply']==1) & (tweet_data_original_high_conf.loc[:, 'hashtag_count']==0)]
hashtag_data = tweet_data_original_high_conf[(tweet_data_original_high_conf.loc[:, 'reply']==0) & (tweet_data_original_high_conf.loc[:, 'hashtag_count']>0)]
stats, results = run_compare_test(reply_data, hashtag_data)
all_stats = all_stats.append(pd.DataFrame(stats).transpose())
all_results = all_results.append(pd.DataFrame(results).transpose())
## write stats, results to file
all_stats.loc[:, 'condition'] = ['hashtags_vs_all', 'hashtags_vs_hashtags', 'replies_vs_all']
all_results.loc[:, 'condition'] = ['hashtags_vs_all', 'hashtags_vs_hashtags', 'replies_vs_all']
all_stats.transpose().to_csv(stats_out_file, sep='\t', index=True, header=False)
all_results.transpose().to_csv(results_out_file, sep='\t', index=True, header=False)
if __name__ == '__main__':
main()
| true |
73959121bd47629e9b2107861fd8cb7a8ce2003e
|
Python
|
doraemon1293/ZOJ
|
/2433.py
|
UTF-8
| 558 | 2.765625 | 3 |
[] |
no_license
|
import sys
sys.stdin=open('test.txt','r')
testcases=int(sys.stdin.readline())
for testcase in range(testcases):
sys.stdin.readline()
n=int(sys.stdin.readline().strip())
a=map(int,sys.stdin.readline().strip().split())
a=[0]+a
if n<4:
print 0
else:
mini=sys.maxint
for i in range(2,n-1):
if a[i]-a[i-1]<mini:
mini=a[i]-a[i-1]
city=i
print a[-1]+mini
print city+1,1,n,city
if testcase!=testcases-1:
print
| true |
983f46fc18435d014eb6759652b64c85f031c25c
|
Python
|
Liverworks/Python_dz
|
/7.formatting_comprehensions/search.py
|
UTF-8
| 741 | 3.765625 | 4 |
[] |
no_license
|
l = [1,4,5,3,6,7,0,2]
def lin_search(l, el):
"""
:param l: list
:param el: element to find
:return: index of element found
"""
for ind, i in enumerate(l):
if i == el:
return ind
def bin_search(l, el, ind=0):
"""
:param l: sorted list
:param el: element to find
:param ind: do not use
:return: index of element found
"""
a = len(l)//2
if l[a] == el:
return a + ind
elif len(l) == 1:
return "Element not in list!"
elif l[a] > el:
l = l[0:a]
return bin_search(l, el)
else:
l = l[a:len(l)]
return bin_search(l, el, ind = a + ind)
print(lin_search(l, 1))
l = sorted(l)
print(l)
print(bin_search(l, 100))
| true |
8f3867fd26a0530f2558d203c918aa4b96f08d12
|
Python
|
andrewmr/travellingsalesman
|
/tsp/importer.py
|
UTF-8
| 1,942 | 3.03125 | 3 |
[] |
no_license
|
import re
from tour import Tour
import logging
from algorithms import utils
logger = logging.getLogger(__name__)
class Importer:
def __init__(self):
self.regex = re.compile("[^a-zA-Z0-9,=]", re.UNICODE)
self.tour_name = ""
self.tour_size = 0
self.tour_nodes = []
self.success = False
def load(self,f):
"""Loads in the import data and returns a Tour instance"""
open_file = open(f, "r")
self.contents = open_file.read()
# clean up and extrace the base data
self.clean_up()
# put together the src-dst array map
self.parse_nodes()
return Tour(self.tour_name, self.tour_nodes)
def clean_up(self):
"""Cleans up the imported data and pulls out some metadata"""
self.contents = self.regex.sub('', self.contents)
self.contents = self.contents.split(",")
# pull out the name
self.tour_name = self.contents.pop(0)
self.tour_name = self.tour_name.replace("NAME=", "")
# pull out the tour size
self.tour_size = self.contents.pop(0)
self.tour_size = int(self.tour_size.replace("SIZE=", ""))
# nodes time
self.tour_nodes = self.contents
self.contents = None
def parse_nodes(self):
if self.tour_size == 0: return
expected = utils.n_choose_k(self.tour_size,2)
path_count = len(self.tour_nodes)
if expected < len(self.tour_nodes):
logger.debug('Found %d paths, expecting %d - using expected' % (
len(self.tour_nodes), expected
))
path_count = expected
# let's go...
x = 1
y = 2
j = 0
# initialise a 0-array of the right size (the diagonal will be 0s)
nodes = [[0 for col in range(self.tour_size)] for row in range(self.tour_size)]
while j < path_count:
nodes[x-1][y-1] = int(self.tour_nodes[j])
nodes[y-1][x-1] = int(self.tour_nodes[j])
# work out where in the distaince matrix we are
if (y == self.tour_size):
x += 1
y = (x+1) % self.tour_size
else:
y += 1
j += 1
self.tour_nodes = nodes
| true |
458d74599132f0957ad58d1b4eda309a165f2852
|
Python
|
Harjacober/CodeforcesSolvedProblems
|
/ChipsMoving.py
|
UTF-8
| 293 | 3.484375 | 3 |
[] |
no_license
|
import operator
def chipsMoving(n, coord):
zero = 0
one = 0
for i in coord:
if i%2 == 0:
one += 1
else:
zero += 1
return min(one, zero)
n = int(input())
coord = list(map(int, input().split()))
print(chipsMoving(n, coord))
| true |
1c99b4c22b4f821ba8fc04a274231c2f5f8b526e
|
Python
|
storopoli/seletivo-lattes
|
/seletivo-lattes/__init__.py
|
UTF-8
| 2,825 | 2.875 | 3 |
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
#importar bibliotecas
import pandas as pd
import requests
from bs4 import BeautifulSoup
from time import sleep
#importar lista e lattes do arquivo listalattes.xlsx
#coluna 0 e link lattes
#coluna 1 e PPG ex GEAS
#caso tenha um PPG com M e D colocar PPG-M ou PPG-D ex PPGA-D
lattes_df = pd.read_excel('listalattes.xlsx', header= None, index_col= None)
lattes_df.columns = ['link_lattes','PPG']
list_lattes = lattes_df['link_lattes']
print('Number of total CVs: ', len(list_lattes))
seletivo_df = pd.DataFrame()
for row in list_lattes:
url = row
page = requests.get(url)
#Parse o HTML
soup = BeautifulSoup(page.text, 'html.parser')
#Achar nome do titular do CV Lattes
name = soup.find(class_='nome')
name = name.text
#Achar a data de ultima atualização
ultima_atualizacao = soup.find('ul', class_='informacoes-autor')
ultima_atualizacao = ultima_atualizacao.text[-11:-1]
#Achar Endereço Profissional
#esse tal de layout-cell-pad-5 tem muitos no lattes
layout_cell_pad_5 = soup.findAll('div', class_='layout-cell-pad-5')
endereco_prof = layout_cell_pad_5[5].text
#### ULTIMA FORMACAO###
#Ano da ultima formacao
ano_ultima_formacao = layout_cell_pad_5[6].text[1:-1]
#Formacao TITULO e IES
formacao = layout_cell_pad_5[7].text.split('\n')[0]
formacao_titulo = formacao.split('.')[0]
formacao_ies = formacao.split('.')[1]
#Achar ultima atuacao profissional
ultimo_vinculo = soup.findAll('div', class_='inst_back')[0]
ultimo_vinculo_ies = ultimo_vinculo.text[1:].split('\n')[0]
#Achar todos os artigos completos publicados em periodicos
tb = soup.find_all('div', class_='artigo-completo')
#fazer uma lista com a quantidade de artigos completos
prod = 0
for i in tb:
prod += 1
new_df = pd.DataFrame(
{'nome': name,
'ultima_atualizacao': ultima_atualizacao,
'endereco_prof': endereco_prof,
'ano_ultima_formacao': ano_ultima_formacao,
'formacao_titulo': formacao_titulo,
'formacao_ies': formacao_ies,
'ultimo_vinculo_ies': ultimo_vinculo_ies,
'prod_artigos_completos': prod
}, index=[0])
seletivo_df = seletivo_df.append(new_df, ignore_index=True)
sleep(1) #esperar 1 segundo para cada requisicao
seletivo_df = pd.concat([lattes_df, seletivo_df], axis=1, ignore_index=True)
#arrumar colnames
seletivo_df.columns = ['link_lattes','PPG',
'nome','ultima_atualizacao',
'endereco_prof','ano_ultima_formacao',
'formacao_titulo','formacao_ies',
'ultimo_vinculo_ies','prod_artigos_completos']
#exportar para Excel XLSX
seletivo_df.to_excel('seletivo_lattes.xlsx',index=False)
print('Done! You may now open the file seletivo_lattes.xlsx')
| true |
7fb89940f55140c20d58307188c1539b5ee53303
|
Python
|
cupertinoUsa/michigan-data-science
|
/network-analysis/wk3/part1.py
|
UTF-8
| 543 | 2.6875 | 3 |
[] |
no_license
|
import networkx as nx
def data():
return nx.read_gml('friendships.gml')
def q1(G):
return \
nx.degree_centrality(G)[100], \
nx.closeness_centrality(G)[100], \
nx.betweenness_centrality(G, normalized=True, endpoints=False)[100]
def q2(G):
return max(nx.degree_centrality(G).items(), key=lambda n: n[1])
def q3(G):
return max(nx.closeness_centrality(G).items(), key=lambda n: n[1])
def q4(G):
return max(nx.betweenness_centrality(G, normalized=True, endpoints=False).items(), key=lambda n: n[1])
| true |
8f412696a739cf1d9056fefa5bbd2d113fb9a604
|
Python
|
alldatacenter/alldata
|
/dts/airbyte/airbyte-integrations/connectors/destination-google-sheets/destination_google_sheets/helpers.py
|
UTF-8
| 2,750 | 2.640625 | 3 |
[
"MIT",
"Elastic-2.0",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import re
from typing import List
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import ConfiguredAirbyteCatalog
from pygsheets import Spreadsheet, Worksheet
from pygsheets.exceptions import WorksheetNotFound
STREAMS_COUNT_LIMIT = 200
logger = AirbyteLogger()
def get_spreadsheet_id(id_or_url: str) -> str:
if re.match(r"(http://)|(https://)", id_or_url):
m = re.search(r"(/)([-\w]{40,})([/]?)", id_or_url)
if m.group(2):
return m.group(2)
else:
logger.error(
"The provided URL doesn't match the requirements. See <a href='https://docs.airbyte.com/integrations/destinations/google-sheets#sheetlink'>this guide</a> for more details."
)
else:
return id_or_url
def get_streams_from_catalog(catalog: ConfiguredAirbyteCatalog, limit: int = STREAMS_COUNT_LIMIT):
streams_count = len(catalog.streams)
if streams_count > limit:
logger.warn(f"Only {limit} of {streams_count} will be processed due to Google Sheets (worksheet count < {limit}) limitations.")
return catalog.streams[:limit]
return catalog.streams
class ConnectionTest:
"""
Performs connection test write operation to ensure the target spreadsheet is available for writing.
Initiating the class itself, performs the connection test and stores the result in ConnectionTest.result property.
"""
def __init__(self, spreadsheet: Spreadsheet):
self.spreadsheet = spreadsheet
self.wks_name: str = "_airbyte_conn_test"
self.test_data: List[str] = ["conn_test", "success"]
def add_test_wks(self) -> Worksheet:
self.spreadsheet.spreadsheet.add_worksheet(self.wks_name, rows=2, cols=1)
return self.spreadsheet.open_worksheet(self.wks_name)
def remove_test_wks(self):
wks = self.spreadsheet.open_worksheet(self.wks_name)
self.spreadsheet.spreadsheet.del_worksheet(wks)
def populate_test_wks(self, wks: Worksheet) -> Worksheet:
wks.append_table(self.test_data, dimension="COLUMNS")
return wks
def check_values(self, wks: Worksheet) -> bool:
value = wks.get_value("A2")
return True if value == self.test_data[1] else False
def perform_connection_test(self) -> bool:
try:
if self.spreadsheet.spreadsheet.worksheets("title", self.wks_name):
self.remove_test_wks()
result: bool = self.check_values(self.populate_test_wks(self.add_test_wks()))
except WorksheetNotFound:
result: bool = self.check_values(self.populate_test_wks(self.add_test_wks()))
self.remove_test_wks()
return result
| true |
d0160b32b0372de3e5696844d0ed3abdbeaac772
|
Python
|
joshua-who-now/Wumpus-No-Wumpus
|
/Wumpus-No-Wumpus_(Python)/WorldProperty.py
|
UTF-8
| 839 | 3.171875 | 3 |
[] |
no_license
|
# <SUMMARY>
# J WorldProperty is a class that will be passed into the ValueIteration Algorithm/Function
# Y because Java parameters in functions are pass by value, they are not deep copies therefore
# * changes will not be reflected in parameters if they are modified, therefore we want to create
# * an object to return all relevant information back to the main running program
# *
# * More information/variables can be put here depending on what kind of information
# * is needed in an algorithm/depending on it's use
# *
# * Written By: Joshua Yuen, 2021
class WorldProperty:
gridworld = [[]]
terminationCondition = False
delta = 0.0
def __init__(self, gridworld, terminationCondition, delta):
self.gridworld = gridworld
self.terminationCondition = terminationCondition
self.delta = delta
| true |
fc97d431fd54ea4515979f4e0f4fa3898196f35f
|
Python
|
cvelazquezr/library-naturalness
|
/analyse_dependencies.py
|
UTF-8
| 10,937 | 2.53125 | 3 |
[] |
no_license
|
import os
import pandas as pd
from pydriller import RepositoryMining
from xml.etree import ElementTree
from tokenizer import TokeNizer
from matplotlib import pyplot as plt
from get_entropy import *
REPOSITORIES_FOLDER = "data/"
def extract_dependencies(pom_file: str):
pom_str = list()
with open(pom_file) as f:
while True:
line = f.readline()
if not line:
break
else:
pom_str.append(line.strip())
dependencies = list()
tree = ElementTree.fromstring("\n".join(pom_str))
# Analyze properties of the POM file
for child in tree:
child_tag = child.tag
child_tag = child_tag.split('}')[-1]
if child_tag == "dependencies":
for dependency in child:
dependency_sentence = ''
for attribute in dependency:
dependency_sentence += attribute.text + "|"
dependencies.append(dependency_sentence[:-1])
return set(dependencies)
def checkout_previous_version(project_path, hash_number):
os.system(f"cd {project_path} && git checkout {hash_number} > /dev/null 2>&1")
def analyse_java_files(project_path, number):
project_name = project_path.split("/")[1]
code_files = list()
for root, folders, files in os.walk(project_path):
for file in files:
if file.endswith('.java'):
token_procedure = TokeNizer("Java")
code = code_file_to_str(os.path.join(root, file))
tokens = ' '.join(token_procedure.get_pure_tokens(code))
code_files.append(tokens)
path_folder = f"naturalness-data/java/new_data/{project_name}/"
if not os.path.exists(path_folder):
os.mkdir(path_folder)
with open(f"{path_folder}/fold{number}.train", "w") as f:
for file_code in code_files:
f.writelines(file_code + "\n")
def code_file_to_str(path_file: str):
lines_code = list()
with open(path_file) as f:
while True:
line = f.readline()
if not line:
break
else:
line = line.strip()
if not line.count("*"):
lines_code.append(line)
return ' '.join(lines_code)
def modification_to_str(modification: str):
if not modification:
return ""
lines_code = list()
modification_lines = modification.split("\n")
for mod in modification_lines:
line = mod.strip()
if not line.count("*"):
lines_code.append(line)
return ' '.join(lines_code)
def found_match_update(library: str, list_libraries: list):
updated_libraries = list()
group_artifact = "|".join(library.split("|")[:2])
for lib in list_libraries:
if lib.startswith(group_artifact):
updated_libraries.append(lib)
if not len(updated_libraries):
group = library.split("|")[0]
for lib in list_libraries:
if lib.startswith(group):
updated_libraries.append(lib)
return updated_libraries
def analyse_dependencies_changes(project_path: str):
hash_list = list()
dependencies_history = list()
commit_counter = 0
pom_counter = 0
dependency_counter = 0
for commit in RepositoryMining(project_path).traverse_commits():
hash_list.append(commit.hash)
commit_counter += 1
added_value = 0
removed_value = 0
for modification in commit.modifications:
if modification.filename == "pom.xml":
if modification.new_path:
path = modification.new_path
else:
path = modification.old_path
if pom_counter < 1:
checkout_previous_version(project_path, commit.hash)
dependencies_history.append(extract_dependencies(project_path + "/" + path))
else:
checkout_previous_version(project_path, commit.hash)
previous_dependencies = dependencies_history[pom_counter - 1]
current_dependencies = extract_dependencies(project_path + "/" + path)
removed_libraries = previous_dependencies.difference(current_dependencies)
added_libraries = current_dependencies.difference(previous_dependencies)
dependencies_history.append(current_dependencies)
# Checking for only changes in the dependencies
if len(removed_libraries) or len(added_libraries):
dependency_counter += 1
# Make the trains files with the snapshot before
checkout_previous_version(project_path, hash_list[commit_counter - 1])
analyse_java_files(project_path, dependency_counter - 1)
# Make the test files with the files changed
checkout_previous_version(project_path, commit.hash)
code_files = list()
for mod in commit.modifications:
if mod.filename.endswith(".java"):
removed_value += mod.removed
added_value += mod.added
source_preprocessed = preprocess_code(mod.source_code)
token_procedure = TokeNizer("Java")
code = modification_to_str(source_preprocessed)
tokens = ' '.join(token_procedure.get_pure_tokens(code))
code_files.append(tokens)
with open(f"naturalness-data/java/new_data/{project_path.split('/')[1]}/fold{dependency_counter - 1}.test", "w") as f:
for file_code in code_files:
f.writelines(file_code + "\n")
pom_counter += 1
print(f"Commits: {commit_counter}, POM Changes: {pom_counter}, Dependencies changed: {dependency_counter}")
restore_to_latest_commit(project_path)
return dependency_counter
def restore_to_latest_commit(project_path: str):
os.system(f"cd {project_path} && git checkout master > /dev/null 2>&1")
def preprocess_code(code: str):
code_split = code.split("\n") if code else " "
code_filtered = list()
for line in code_split:
line = line.strip()
if not line.startswith("import ") and not line.startswith("package ") and not line.count("@"):
code_filtered.append(line)
return "\n".join(code_filtered)
def get_entropy_commit(data_path: str, stopwords: list, number_commits: int):
print(f"Getting entropy values ...")
entropy_unigram_list = list()
entropy_bigram_list = list()
entropy_trigram_list = list()
for i in range(number_commits):
train_file = data_path + "/" + f"fold{i}.train"
test_file = data_path + "/" + f"fold{i}.test"
data_train, data_test = extract_data(train_file, test_file)
# Cleaning input
data_train = preprocess_data(data_train, stopwords)
data_test = preprocess_data(data_test, stopwords)
probabilities_unigram = get_probabilities_unigram(data_train)
entropy_unigram = get_entropy_unigram(data_test, data_train, probabilities_unigram)
entropy_unigram_list.append(entropy_unigram)
keys_bigram, probabilities_bigram = get_probabilities_bigram(data_train)
entropy_bigram = get_entropy_bigram(data_test,
data_train,
keys_bigram,
probabilities_unigram,
probabilities_bigram)
entropy_bigram_list.append(entropy_bigram)
keys_trigram, probabilities_trigram = get_probabilities_trigram(data_train)
entropy_trigram = get_entropy_trigram(data_test,
data_train,
keys_trigram,
keys_bigram,
probabilities_unigram,
probabilities_bigram,
probabilities_trigram)
entropy_trigram_list.append(entropy_trigram)
return entropy_unigram_list, entropy_bigram_list, entropy_trigram_list
def get_reserved_words():
reserved_words = list()
with open("java_words.txt") as f:
while True:
line = f.readline()
if not line:
break
else:
line = line.strip()
reserved_words.append(line)
return reserved_words
def plot_trigrams(results_path: str, project_name: str):
dataframe = pd.read_csv(results_path)
if (dataframe["trigram_values"] > 10).any():
max_lim = max(dataframe["trigram_values"]) + 1
else:
max_lim = 10
plt.plot(range(len(dataframe)), dataframe["trigram_values"], ".g-", label="Trigram Model")
plt.xlabel("Commits")
plt.ylabel("Entropy")
plt.ylim([0, max_lim])
plt.legend()
plt.title(project_name)
plt.show()
def save_results_csv(project_path: str, unigram_values: list, bigram_values: list, trigram_values: list):
print("Saving the results ...")
results_folder = "results/entropy/java/"
data = {'unigram_values': unigram_values,
'bigram_values': bigram_values,
'trigram_values': trigram_values}
dataframe = pd.DataFrame(data=data)
dataframe.to_csv(results_folder + f"{project_path}.csv")
if __name__ == '__main__':
projects_poms = list()
with open(f'{REPOSITORIES_FOLDER}/remaining.txt') as f:
while True:
line = f.readline()
if not line:
break
else:
line = line.strip()
projects_poms.append(line)
reserved = get_reserved_words()
for project_location in projects_poms:
project_name = project_location.split("/")[1]
print(f"Analysing project {project_name} ...")
commits = analyse_dependencies_changes(project_location)
unigram_list, bigram_list, trigram_list = get_entropy_commit(f"naturalness-data/java/new_data/{project_name}",
reserved,
commits)
save_results_csv(project_name, unigram_list, bigram_list, trigram_list)
# Plot only the trigram model
for project_location in projects_poms:
project_name = project_location.split("/")[1]
plot_trigrams(f"results/entropy/java/{project_name}.csv", project_name)
| true |
7bdee8ea5ee6463f60a5be30f98fc1e657aaae19
|
Python
|
jtrujillo1024/Monty_Hall_Simulation
|
/Monty_Hall_Simulation.py
|
UTF-8
| 1,180 | 3.765625 | 4 |
[] |
no_license
|
import random
def choose():
return random.randint(1, 3)
def stay_game():
win_door = choose()
chosen_door = choose()
wrong_door = choose()
while wrong_door == win_door or wrong_door == chosen_door:
wrong_door = choose()
if win_door == chosen_door:
return True
def change_game():
win_door = choose()
chosen_door = choose()
wrong_door = choose()
while wrong_door == win_door or wrong_door == chosen_door:
wrong_door = choose()
chosen_door = 6 - chosen_door - wrong_door #1+2+3=6, subtracting chosen_door and wrong_door from 6 equals the remaining door's value
if win_door == chosen_door:
return True
def main():
stay_win_count = 0
for x in range(100000):
if stay_game():
stay_win_count = stay_win_count + 1
print("Stay Win Rate: {} percent".format(stay_win_count / 100000))
change_win_count = 0
for x in range(100000):
if change_game():
change_win_count = change_win_count + 1
print("Change Win Rate: {} percent".format(change_win_count / 100000))
if __name__ == '__main__':
main()
| true |
e6e1ec1916f9ba5215729c9c958e2054a093b590
|
Python
|
jef771/competitive_programming_practice
|
/code_forces/160A/a.py
|
UTF-8
| 346 | 3.171875 | 3 |
[] |
no_license
|
import sys
def main():
sys.stdin.readline()
money = list(map(int, sys.stdin.readline().split()))
money.sort(reverse = True)
ans = []
for i in range(len(money)):
ans.append(money[i])
if sum(ans) > sum(money[i+1:]):
break
sys.stdout.write(f"{len(ans)}")
if __name__ == '__main__':
main()
| true |
e04ef7a8794633ca3452237b3f902ff74dd91851
|
Python
|
tomron27/regex
|
/models/attention.py
|
UTF-8
| 7,709 | 2.546875 | 3 |
[] |
no_license
|
import torch
import torch.nn.functional as F
from torch import nn
class SumPool(nn.Module):
def __init__(self, factor=2):
super(SumPool, self).__init__()
self.factor = factor
self.avgpool = nn.AvgPool2d(kernel_size=(factor, factor), stride=(factor, factor), padding=(0, 0))
def forward(self, x):
return self.avgpool(x) * (self.factor ** 2)
class SimpleSelfAttention(nn.Module):
def __init__(self, input_channels, embed_channels, kernel_size=(1, 1), stride=(1, 1),
padding=(0, 0), name='simple_self_attn'):
super(SimpleSelfAttention, self).__init__()
self.w1 = nn.Conv2d(in_channels=input_channels,
out_channels=embed_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,)
self.w2 = nn.Conv2d(in_channels=embed_channels,
out_channels=1,
kernel_size=kernel_size,
stride=stride,
padding=padding)
self.relu = nn.ReLU(inplace=True)
self.name = name
def forward(self, x):
tau = self.w1(x)
tau = F.normalize(tau)
tau = self.relu(tau)
tau = self.w2(tau).squeeze(1)
tau = torch.softmax(tau.flatten(1), dim=1).reshape(tau.shape)
attended_x = torch.einsum('bcxy,bxy->bcxy', x, tau)
return attended_x, tau
class SimpleUnary(nn.Module):
def __init__(self, input_embed_channels, output_embed_channels, kernel_size=(1, 1), stride=(1, 1),
padding=(0, 0), name='unary_att'):
super(SimpleUnary, self).__init__()
self.w1 = nn.Conv2d(in_channels=input_embed_channels,
out_channels=output_embed_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,)
self.w2 = nn.Conv2d(in_channels=output_embed_channels,
out_channels=1,
kernel_size=kernel_size,
stride=stride,
padding=padding)
# self.dropout = nn.Dropout2d(dropout_prob)
self.name = name
def forward(self, x):
x = self.w1(x)
x = F.normalize(x)
x = torch.relu(x)
x = self.w3(x)
# x = F.normalize(x)
# 1 X 16 X 16
return x.squeeze(1)
class Unary(nn.Module):
def __init__(self, input_embed_channels, output_embed_channels, kernel_size=(1, 1), stride=(1, 1),
padding=(0, 0), name='unary_att'):
super(Unary, self).__init__()
self.w1 = nn.Conv2d(in_channels=input_embed_channels,
out_channels=output_embed_channels // 2,
kernel_size=kernel_size,
stride=stride,
padding=padding,)
self.w2 = nn.Conv2d(in_channels=output_embed_channels // 2,
out_channels=output_embed_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding)
self.w3 = nn.Conv2d(in_channels=output_embed_channels,
out_channels=1,
kernel_size=kernel_size,
stride=stride,
padding=padding)
# self.dropout = nn.Dropout2d(dropout_prob)
self.w4 = nn.Conv2d(in_channels=1,
out_channels=1,
kernel_size=kernel_size,
stride=stride,
padding=padding)
self.name = name
def forward(self, x):
x = self.w1(x)
x = F.normalize(x)
x = torch.relu(x)
x = self.w2(x)
x = F.normalize(x)
x = torch.relu(x)
x = self.w3(x)
x = self.w4(x)
# x = F.normalize(x)
# 1 X 16 X 16
return x.squeeze(1)
class SelfAttn(nn.Module):
def __init__(self, input_embed_channels, output_embed_channels, kernel_size=(1, 1), name='self_attn', learnable=True):
super(SelfAttn, self).__init__()
self.learnable = learnable
if self.learnable:
self.unary = Unary(input_embed_channels=input_embed_channels,
output_embed_channels=output_embed_channels,
kernel_size=kernel_size)
self.name = name
def forward(self, x):
tau = self.unary(x)
tau = torch.softmax(tau.view(tau.shape[0], -1), dim=1).reshape(tau.shape)
attended_x = torch.einsum('bcxy,bxy->bcxy', x, tau)
return attended_x, tau
class Marginals(nn.Module):
def __init__(self, margin_dim=256, factor=2):
super(Marginals, self).__init__()
self.factor = factor
self.margin_dim = margin_dim
self.tau_pool = SumPool(factor=factor)
self.lamb = nn.Parameter(torch.ones(1, 1, margin_dim, margin_dim))
self.name = "marginals"
def forward(self, tau1, tau2):
tau1_marginal = self.tau_pool(tau1)
tau1_lamb = tau1_marginal * torch.exp(-self.lamb)
tau1_lamb_sum = tau1_lamb.view(tau1_lamb.shape[0], -1).sum(dim=1)
tau1_lamb = tau1_lamb / tau1_lamb_sum
tau2_lamb = tau2 * torch.exp(self.lamb)
tau2_lamb_sum = tau2_lamb.view(tau2_lamb.shape[0], -1).sum(dim=1)
tau2_lamb = tau2_lamb / tau2_lamb_sum
return (tau1, tau2), (tau1_lamb, tau2_lamb)
class MarginalsExtended(nn.Module):
def __init__(self, margin_dim=256, factor=2):
super(MarginalsExtended, self).__init__()
self.factor = factor
self.margin_dim = margin_dim
self.tau_pool = SumPool(factor=factor)
# self.lamb1 = nn.Parameter(torch.ones(1, 1, margin_dim, margin_dim))
# self.lamb2 = nn.Parameter(torch.ones(1, 1, margin_dim // 2, margin_dim // 2))
# self.lamb3 = nn.Parameter(torch.ones(1, 1, margin_dim // 4, margin_dim // 4))
self.lamb4 = nn.Parameter(torch.ones(1, 1, margin_dim // 8, margin_dim // 8))
self.name = "marginals_extended"
def forward(self, tau3, tau4):
# def forward(self, tau1, tau2, tau3, tau4):
# tau1_lamb_neg = SumPool(2)(tau1) * torch.exp(-self.lamb2)
# tau1_lamb_neg = tau1_lamb_neg / tau1_lamb_neg.view(tau1_lamb_neg.shape[0], -1).sum(dim=1)
#
# tau2_lamb = tau2 * torch.exp(self.lamb2)
# tau2_lamb = tau2_lamb / tau2_lamb.view(tau2_lamb.shape[0], -1).sum(dim=1)
# tau2_lamb_neg = SumPool(2)(tau2) * torch.exp(-self.lamb3)
# tau2_lamb_neg = tau2_lamb_neg / tau2_lamb_neg.view(tau2_lamb_neg.shape[0], -1).sum(dim=1)
#
# tau3_lamb = tau3 * torch.exp(self.lamb3)
# tau3_lamb = tau3_lamb / tau3_lamb.view(tau3_lamb.shape[0], -1).sum(dim=1)
tau3_lamb_neg = SumPool(2)(tau3) * torch.exp(-self.lamb4)
tau3_lamb_neg = tau3_lamb_neg / tau3_lamb_neg.view(tau3_lamb_neg.shape[0], -1).sum(dim=1)
tau4_lamb = tau4 * torch.exp(self.lamb4)
tau4_lamb = tau4_lamb / tau4_lamb.view(tau4_lamb.shape[0], -1).sum(dim=1)
# source, target
# return (tau1_lamb_neg, tau2_lamb), (tau2_lamb_neg, tau3_lamb), (tau3_lamb_neg, tau4_lamb)
return ((tau3_lamb_neg, tau4_lamb),)
if __name__ == "__main__":
layer = MarginalsExtended(margin_dim=256)
init_size = 256
taus = [torch.randn(1, 1, 256 // (2**i), 256 // (2**i)) for i in range(4)]
res = layer(*taus)
pass
| true |
979331787eff3f1579da3c347f7bbe66d08202d7
|
Python
|
triquelme/MSc_Bioinformatics_projects
|
/Algorithmics/factorielle.py
|
UTF-8
| 102 | 3.46875 | 3 |
[] |
no_license
|
def factorielle(n):
if n==1:
return 1
return factorielle(n-1)*n
print(factorielle(3))
| true |
b3c03b0ef0aa240e7e45cdc7865b73d3c3e98e0f
|
Python
|
weidler/RLaSpa
|
/src/representation/network/janus.py
|
UTF-8
| 2,559 | 3.0625 | 3 |
[
"MIT"
] |
permissive
|
import random
import torch
import torch.nn as nn
import torch.optim as optim
class JanusAutoencoder(nn.Module):
def __init__(self, inputNeurons=4, hiddenNeurons=3, outputNeurons=4, actionDim=1):
super(JanusAutoencoder, self).__init__()
self.encoder = nn.Linear(inputNeurons, hiddenNeurons)
# decoderState decodes current state from state latent space
self.decoderState = nn.Linear(hiddenNeurons, outputNeurons)
# decoderNextState decodes next state from state latent space + action
self.decoderNextState = nn.Linear(hiddenNeurons + actionDim, outputNeurons)
self.activation = torch.sigmoid
def forward(self, state: torch.Tensor, action: torch.Tensor):
if state.dim() <= 1 or action.dim() <= 1:
raise ValueError(
"Networks expect any input to be given as a batch. For single input, provide a batch of size 1.")
# encode current state and create latent space
latent_space = self.activation(self.encoder(state))
# decode current state
outState = (self.decoderState(latent_space))
# append action to latent space
latent_space_action = torch.cat((latent_space, action), 1)
# decode next state from latent space with action
outNextState = (self.decoderNextState(latent_space_action))
return torch.cat((outState, outNextState), 0)
if __name__ == "__main__":
with open("../../../data/cartpole.data") as f:
data = [list(map(eval, l[:-1].split("\t"))) for l in f.readlines()]
print("READ FILE")
# print(data[0])
net = JanusAutoencoder(4, 3, 4, 1)
optimizer = optim.SGD(net.parameters(), lr=0.1)
criterion = nn.MSELoss()
total_loss = 0
print_every = 10000
epochs = 100000
for epoch in range(1, epochs):
rand = random.randint(0, len(data) - 2)
sample_id = rand
next_sample_id = rand + 1
current_state = torch.tensor(data[sample_id][0])
action = torch.tensor([data[sample_id][1]]).float()
next_state = torch.tensor(data[next_sample_id][0])
target = torch.cat((current_state, next_state), 0)
optimizer.zero_grad()
out = net(current_state, action)
loss = criterion(out, target)
loss.backward()
optimizer.step()
total_loss += loss.item()
if epoch % print_every == 0:
print(f"Epoch {epoch}: {total_loss/print_every}.")
total_loss = 0
torch.save(net.state_dict(), "../../models/siamese.model")
| true |
32053a87ba50cc94f22736f6bad671626bfe65b8
|
Python
|
gwhitaker13/pytest
|
/war.py
|
UTF-8
| 3,927 | 4.3125 | 4 |
[] |
no_license
|
"""
# ***War Game***
# War is a card game designed for two players, utilizing a standard (French style) 52-card deck of playing-cards.
# The objective is to capture all the cards in the game before your opponent.
# *Gameplay*
# All cards are shuffled, and then divided equally to each player in face down stacks (one stack for each player).
# Each player reveals the top card of their deck simultaneously,
# with the player revealing the highest-ranking card winning that particular round and thusly capturing their opponent's card
# (in addition to retaining their card). Both cards are then returned to the round-winner's deck, placed face down at the bottom.
# Gameplay continues in the above fashion, with players playing out consecutive rounds, until one player runs out of cards and loses.
# *Rankings*
# Cards are ranked by face value, with Ace, King, Queen, and Jack each (in order) taking the highest ranking,
# followed by number cards in numerical order (10 being worth more than a 9, etc.).
# *Ties*
# In the event of a tie in a round - two players playing the same ranked cards - both cards are left face up between the two players,
# and play proceeds to the next round. The winner of the next round takes all cards from the current as well as previous round.
# *Challenge*
# Your challenge is to write an application to simulate a game of War. Play out a game in full, and output the winner.
# Additionally, outputting the results of each round, including the card that each player played as well as the verdict of which player won.
# If no winner exists after 100 rounds, the game ends with a prompt to play chess instead.
"""
import random
def play_war():
# Generate deck of cards
deck = [n for n in range(2, 15) * 4]
# Substitute face cards as 11 - 14, retrieve w dict
faces = {11: 'Jack', 12: 'Queen', 13: 'King', 14: 'Ace'}
# Randomize
random.shuffle(deck)
# Split the deck between players
a_cards = deck[:26]
b_cards = deck[26:]
round_count = 1
tie_cache = [] # Store tie cards here
while a_cards and b_cards:
if round_count < 100:
# take from front, append back
a_card = a_cards.pop(0)
b_card = b_cards.pop(0)
if a_card > b_card:
a_cards.append(a_card)
a_cards.append(b_card)
# check for cached cards from previous round, append, clear cache
if len(tie_cache) != 0:
for c in tie_cache:
a_cards.append(c)
del tie_cache[:]
a_card_print = faces.get(a_card, a_card)
b_card_print = faces.get(b_card, b_card)
print('Round {0}: {1} beats {2}. Player A wins!'.format(round_count, a_card_print, b_card_print))
elif a_card < b_card:
b_cards.append(a_card)
b_cards.append(b_card)
# check for cached cards from previous round, append, clear cache
if len(tie_cache) != 0:
for c in tie_cache:
b_cards.append(c)
del tie_cache[:]
a_card_print = faces.get(a_card, a_card)
b_card_print = faces.get(b_card, b_card)
print('Round {0}: {1} beats {2}. Player B wins!'.format(round_count, b_card_print, a_card_print))
elif a_card == b_card:
tie_cache.append(a_card)
tie_cache.append(b_card)
a_card_print = faces.get(a_card, a_card)
print("Round {0}: It's a tie! Both players had a(n) {1}".format(round_count, a_card_print))
round_count += 1
else:
print('You should play a game of chess instead...')
break
if __name__ == "__main__":
play_war()
| true |
ef0d9b48fce8aafd3069bb6c866e8c6b39779ac6
|
Python
|
AdamZhouSE/pythonHomework
|
/Code/CodeRecords/2636/60670/290784.py
|
UTF-8
| 1,609 | 3.15625 | 3 |
[] |
no_license
|
# 总路程=dist(A,B)+max{dist(A,C)+dist(B,C)}
# dist(A,B)是树的直径时最大,然后枚举C求最大值
class edge:
def __init__(self,cur,to,value,nextedge):
self.cur=cur
self.to=to
self.value=value
self.nextedge=nextedge
def dfs_dia(x,dist):
global visited,v,maxdist,side1
if dist>maxdist:
maxdist=dist
side1=x
head=v[x]
visited[x]=True
while head!=None:
if not visited[head.to]:
dfs_dia(head.to,dist+head.value)
head=head.nextedge
visited[x]=False
def dfs_pre(x,dist):
global visited,v,dist1
dist1[x]=dist
head=v[x]
visited[x]=True
while head!=None:
if not visited[head.to]:
dfs_pre(head.to,dist+head.value)
head=head.nextedge
visited[x]=False
n,m=map(int,input().split())
v=[None for i in range(0,n+1)]
visited=[False for i in range(0,n+1)]
for i in range(0,m):
info=list(map(int,input().split()))
newedge=edge(info[0],info[1],info[2],v[info[0]])
v[info[0]]=newedge
newedge=edge(info[1],info[0],info[2],v[info[1]])
v[info[1]]=newedge
# 求树的直径,从任意一点dfs找到最远一点,再从此点dfs找到最远一点,这两个点就是直径两端
maxdist=0
side1=0
dfs_dia(1,0)
side2=side1
maxdist=0
side1=0
dfs_dia(side2,0)
# 预处理直径端点到其他点的距离
dist1=[0 for i in range(0,n+1)]
dfs_pre(side2,0)
dist2=dist1.copy
dist1=[0 for i in range(0,n+1)]
dfs_pre(side1,0)
ans=maxdist
for i in range(1,n+1):
if i!=side1 and i!=side2:
ans=max(ans,maxdist+dist1[i]+dist2[i])
print(ans)
| true |
7808b98852e1aa6a1304b3b58e1b0c3d51af79c4
|
Python
|
YutingYao/my-crap-code
|
/traProject/utils.py
|
UTF-8
| 35,908 | 2.578125 | 3 |
[
"MIT"
] |
permissive
|
# utils
import heapq
import os
import numpy as np
from numpy.lib import interp
import pandas as pd
from numpy.lib.shape_base import tile
from scipy.stats import norm
from tqdm import tqdm
from shapely.geometry import LineString, Point, Polygon
from shapely.wkt import dumps, loads
def timespan2unix(timespan, isBeijing=True):
"""
[文本时间戳转unix时间戳]
:param timespan: [文本类型时间/时间范围]
:type timespan: [type]
:param isBeijing: [是否+8h], defaults to True
:type isBeijing: bool, optional
:return: [unix时间戳]
:rtype: [type]
"""
delta = 0
if isBeijing:
delta = 28800
if len(timespan) == 1:
unix = pd.to_datetime(timespan).value/1000000000 - delta
else:
unix = [pd.to_datetime(timespan[0]).value/1000000000 - delta,
pd.to_datetime(timespan[1]).value/1000000000-delta]
return unix
def csvMerge(targetFile, outFile='', outName='filesMerge', title=None, sepstr=',', postfix='.csv', sortf=False, sortr=None, fileNameAsIdx=False):
"""
[合并csv文件]
:param targetFile: [目标文件夹]
:type targetFile: [type]
:param outFile: [输出文件夹], defaults to ''
:type outFile: str, optional
:param outName: [输出文件名], defaults to 'filesMerge'
:type outName: str, optional
:param title: [表头], defaults to None
:type title: [type], optional
:param sepstr: [分隔符], defaults to ','
:type sepstr: str, optional
:param postfix: [文件名后缀], defaults to '.csv'
:type postfix: str, optional
:param sortf: [是否排序文件], defaults to False
:type sortf: bool, optional
:param sortr: [排序规则], defaults to None
:type sortr: [例:key=lambda x: int(x.split('/')[-1][:-4])], optional
:param fileNameAsIdx: [文件名作为索引 路网项目组数据使用], defaults to False
:type fileNameAsIdx: bool, optional
"""
if outFile == '':
outFile = targetFile
filenameList = getFileNames(targetFile, postfix=postfix)
fnameList = getFileNames(targetFile, postfix=postfix, fullName=False)
if fileNameAsIdx == True:
idList = list(range(len(fnameList)))
f = np.array(fnameList).reshape(-1, 1)
l = np.array(idList).reshape(-1, 1)
ANS = np.hstack([l, f])
ANS = pd.DataFrame(ANS, columns=['ID', 'fname']).to_csv(
outFile+'/ID-fname.csv', index=0)
if sortf == True:
filenameList.sort(key=lambda x: int(x.split('/')[-1][:-4]))
for idx, f in enumerate(filenameList):
tmp = pd.read_csv(f, sep=sepstr, names=title, header=None)
if fileNameAsIdx == True:
tmp['fileNames'] = idList[idx]
if not title is None:
title.append('fileNames')
if idx == 0:
tmp.to_csv(outFile+outName+'.csv', mode='a',
header=title, index=False)
else:
tmp.to_csv(outFile+outName+'.csv', mode='a', header=0, index=False)
def csvMerge2(targetFile, outFile='', outName='filesMerge', title=None, sepstr=',', postfix='.csv', sortf=False, sortr=None, fileNameAsIdx=False):
"""
[合并csv文件]
:param targetFile: [目标文件夹]
:type targetFile: [type]
:param outFile: [输出文件夹], defaults to ''
:type outFile: str, optional
:param outName: [输出文件名], defaults to 'filesMerge'
:type outName: str, optional
:param title: [表头], defaults to None
:type title: [type], optional
:param sepstr: [分隔符], defaults to ','
:type sepstr: str, optional
:param postfix: [文件名后缀], defaults to '.csv'
:type postfix: str, optional
:param sortf: [是否排序文件], defaults to False
:type sortf: bool, optional
:param sortr: [排序规则], defaults to None
:type sortr: [例:key=lambda x: int(x.split('/')[-1][:-4])], optional
:param fileNameAsIdx: [文件名作为索引 路网项目组数据使用], defaults to False
:type fileNameAsIdx: bool, optional
exmaple:csvMerge2('./test0/','./','lzlmerge',[l for l in range(11)],postfix='.txt',fileNameAsIdx=True)
"""
if outFile == '':
outFile = targetFile
filenameList = getFileNames(targetFile, postfix=postfix)
fnameList = getFileNames(targetFile, postfix=postfix, fullName=False)
if fileNameAsIdx == True:
idList = list(range(len(fnameList)))
fn = np.array(fnameList).reshape(-1, 1)
fid = np.array(idList).reshape(-1, 1)
ANS = np.hstack([fid, fn])
ANS = pd.DataFrame(ANS, columns=['ID', 'fname']).to_csv(
outFile+'/ID-fname_lzl.csv', index=0)
if sortf == True:
filenameList.sort(key=lambda x: int(x.split('/')[-1][:-4]))
for idx, f in enumerate(filenameList):
title1=title.copy()
tmp = pd.read_csv(f, sep=sepstr, names=title1, header=None)
if fileNameAsIdx == True:
tmp['fileNames'] = idList[idx]
if not title1 is None:
title1.append('fileNames')
if idx == 0:
tmp.to_csv(outFile+outName+'.csv', mode='a',
header=title1, index=False)
else:
tmp.to_csv(outFile+outName+'.csv', mode='a', header=None, index=False)
title1.clear()
def pathCheck(A):
"""
[辅助函数,检查路径是否存在,不存在则生成]
:param A: [路径]
:type A: [type]
:return: [路径]
:rtype: [type]
"""
if not os.path.exists(A):
os.makedirs(A)
return A
def read_txt(filePath, nrows=100):
"""
[用于读取大型txt文件]
:param filePath: [路径]
:type filePath: [type]
:param nrows: [前n行], defaults to 100
:type nrows: int, optional
"""
fp = open(filePath, 'r')
while nrows > 0:
print(fp.readline(), end=' ')
nrows -= 1
fp.close()
def getFileNames(fileName, prefix=None, postfix=None, fullName=True):
"""
[返回一个文件夹内所有文件的完整路径名]
:param fileName: [文件夹名]
:type fileName: [type]
:param prefix: [前缀], defaults to None
:type prefix: [type], optional
:param postfix: [后缀], defaults to None
:type postfix: [type], optional
:param fullName: [完整路径], defaults to True
:type fullName: bool, optional
:return: [路径名]
:rtype: [type]
"""
nlist = os.listdir(fileName)
if (prefix == None) & (postfix == None):
x = nlist
else:
x = []
if (prefix != None) & (postfix != None):
for file in nlist:
if file.startswith(prefix) & file.endswith(postfix):
x.append(file)
elif prefix != None:
for file in nlist:
if file.startswith(prefix):
x.append(file)
elif postfix != None:
x = []
for file in nlist:
if file.endswith(postfix):
x.append(file)
y = x
if fullName == True:
y = [fileName+file for file in x]
return y
def scan_files(directory, prefix=None, postfix=None):
"""
[扫描指定路径下符合要求的文件(包含子目录)]
:param directory: [目标路径]
:type directory: [type]
:param prefix: [前缀], defaults to None
:type prefix: [type], optional
:param postfix: [后缀], defaults to None
:type postfix: [type], optional
:return: [files_list]
:rtype: [type]
"""
files_list = []
for root, sub_dirs, files in os.walk(directory):
for special_file in files:
if postfix:
if special_file.endswith(postfix):
files_list.append(os.path.join(root, special_file))
elif prefix:
if special_file.startswith(prefix):
files_list.append(os.path.join(root, special_file))
else:
files_list.append(os.path.join(root, special_file))
return files_list
def csv2txt(csvpath, txtpath):
"""
[csv转txt]
:param csvpath: [csv路径]
:type csvpath: [type]
:param txtpath: [txt路径]
:type txtpath: [type]
"""
data = pd.read_csv(csvpath, encoding='utf-8')
with open(txtpath, 'a+', encoding='utf-8') as f:
for line in data.values:
f.write((str(line)+'\n'))
def calDate(data, timeindex='timestamp', timestr=False, calList=[], isbeijing=True, unit='s'):
"""
[生成时间列]
:param data: [数据或数据路径]
:type data: [type]
:param timestr: [计算文本形式时间], defaults to False
:type timestr: bool, optional
:param calList: [计算列表], defaults to []
:type calList: list, optional
:param isbeijing: [是否UTC+8], defaults to True
:type isbeijing: bool, optional
:return: [处理后的数据]
:rtype: [type]
"""
delta = 0
if isbeijing:
delta = 28800
if isinstance(data, str):
data = pd.read_csv(data)
tmp = pd.to_datetime(data[timeindex]+delta, unit=unit)
if timestr == True:
data['timeStr'] = tmp
if 'hour' in calList:
data['hour'] = tmp.dt.hour
if 'weekday' in calList:
data['weekday'] = tmp.dt.weekday
if 'month' in calList:
data['month'] = tmp.dt.month
if 'day' in calList:
data['day'] = tmp.dt.day
if 'year' in calList:
data['year'] = tmp.dt.year
return data
def calFeature(datapath, outfile='', N=24, minPNum1=10, minPNum2=30, speedName='speed'):
"""
[计算特征]
:param datapath: [输入数据/路径]
:type datapath: [type]
:param outfile: [输出路径未指定则以函数返回值形式处理], defaults to ''
:type outfile: str, optional
:param N: [最大前N个速度], defaults to 24
:type N: int, optional
:param minPNum: [最小特征点数], defaults to 10
:type minPNum: int, optional
:param speedName: [速度列名], defaults to 'speed'
:type minPNum: str, optional
:return: [特征:
VTopN:最大的前N个速度
Vmean:总体平均瞬时速度
Vstd:路段总体标准差
V85 百分之85车速
V15 百分之15车速
V95 百分之95车速
deltaV V85-V15
Vmode 路段速度众数]
:rtype: [type]
"""
oridata = pd.read_csv(datapath)
if len(oridata) < minPNum1:
return 0
linkID = datapath.split('/')[-1].split('.')[0]
data = oridata[oridata[speedName] >= 5]
ANS = None
Features = [linkID]
gs = data.groupby('hour')
Vs = []
for idx, g in gs:
if len(g) < minPNum1:
continue
else:
vtmp = g[speedName].mean() # cal vTOPN
Vs.append(vtmp)
VTopN = heapq.nlargest(N, Vs)
if len(VTopN) < N:
VTopN.extend(['null']*(N-len(VTopN)))
Features.extend(VTopN)
tmp = data[speedName].copy()
if len(tmp) < minPNum2:
Features.extend(['null']*7)
else:
tmp.sort_values(inplace=True)
Vmean = tmp.mean() # cal vmean
Vstd = tmp.std() # cal Rstd
V95 = tmp.quantile(0.95, interpolation='nearest') # V95
V85 = tmp.quantile(0.85, interpolation='nearest') # V85
V15 = tmp.quantile(0.15, interpolation='nearest') # V15
deltaV = V85-V15 # V85-V15
Vmode = tmp.apply(lambda x: int(x+0.5)).mode()[0] # Vmode
Features.extend([Vmean, Vstd, V95, V85, V15, deltaV, Vmode])
if ANS is None:
ANS = np.array([[c] for c in Features]).T
else:
ansTmp = np.array([[c] for c in Features]).T
ANS = np.vstack((ANS, ansTmp))
cols = ['linkID']
VN = ['V'+str(idx) for idx in range(N)]
cols.extend(VN)
cols.extend(['Vmean', 'Vstd', 'V95', 'V85', 'V15', 'deltaV', 'Vmode'])
ANS = pd.DataFrame(ANS, columns=cols)
if outfile != '':
# fname = str(linkID)+'_特征工程.csv'
# ANS.to_csv(outfile+fname, index=0)
fname = outfile + \
'N=%d_minPNum1=%d_minPNum2=%d_speedName=%s.csv' % (
N, minPNum1, minPNum2, speedName)
if not os.path.exists(fname):
ANS.to_csv(fname, index=0, mode='a')
else:
ANS.to_csv(fname, index=0, mode='a', header=None)
else:
return ANS
def genDataSet(datapath, roadpath, outfile='', dname='测试数据集', speedName='speed', minPNum1=10, yrName='link_id', xrName='linkID', attrNames=['oneway', 'layer', 'bridge', 'tunnel', 'viaduct', 'numofLines', 'maxspeed', 'geometry']):
"""
[计算特征]
:param datapath: [输入数据/路径]
:type datapath: [type]
:param outfile: [输出路径未指定则以函数返回值形式处理], defaults to ''
:type outfile: str, optional
:param N: [最大前N个速度], defaults to 24
:type N: int, optional
:param minPNum: [最小特征点数], defaults to 10
:type minPNum: int, optional
:param speedName: [速度列名], defaults to 'speed'
:type minPNum: str, optional
:return: [特征:
VTopN:最大的前N个速度
Vmean:总体平均瞬时速度
Vstd:路段总体标准差
V85 百分之85车速
V15 百分之15车速
V95 百分之95车速
deltaV V85-V15
Vmode 路段速度众数]
:rtype: [type]
"""
oridata = pd.read_csv(datapath)
PNum2 = len(oridata)
linkID = datapath.split('/')[-1].split('.')[0]
data = oridata[oridata[speedName] >= 5]
ANS = None
Features = [linkID]
gs = data.groupby('hour')
Vs = []
for idx, g in gs:
if len(g) < minPNum1:
continue
else:
vtmp = g[speedName].mean() # cal vTOPN
Vs.append(vtmp)
VTopN = heapq.nlargest(24, Vs)
N = len(VTopN)
if N < 24:
VTopN.extend(['null']*(24-len(VTopN)))
Features.extend(VTopN)
tmp = data[speedName].copy()
if len(tmp) < 1:
Features.extend(['null']*7)
else:
tmp.sort_values(inplace=True)
Vmean = tmp.mean() # cal vmean
Vstd = tmp.std() # cal Rstd
V95 = tmp.quantile(0.95, interpolation='nearest') # V95
V85 = tmp.quantile(0.85, interpolation='nearest') # V85
V15 = tmp.quantile(0.15, interpolation='nearest') # V15
deltaV = V85-V15 # V85-V15
Vmode = tmp.apply(lambda x: int(x+0.5)).mode()[0] # Vmode
Features.extend([Vmean, Vstd, V95, V85, V15, deltaV, Vmode])
Features.extend([N, PNum2])
if ANS is None:
ANS = np.array([[c] for c in Features]).T
else:
ansTmp = np.array([[c] for c in Features]).T
ANS = np.vstack((ANS, ansTmp))
cols = ['linkID']
VN = ['VTOP'+str(idx) for idx in range(24)]
cols.extend(VN)
cols.extend(['Vmean', 'Vstd', 'V95', 'V85', 'V15', 'deltaV', 'Vmode'])
cols.extend(['N', 'PNum2'])
ANS = pd.DataFrame(ANS, columns=cols)
ylist = pd.read_csv(roadpath)
Check = ylist[yrName].tolist()
ANS = ANS[ANS[xrName].isin(Check)]
if len(attrNames) == 0:
attrNames = list(ylist)
for attr in attrNames:
ANS[attr] = ANS[xrName].apply(
lambda x: ylist[ylist[yrName] == x][attr].values[0])
if outfile != '':
# fname = str(linkID)+'_特征工程.csv'
# ANS.to_csv(outfile+fname, index=0)
fname = outfile + '%s.csv' % (dname)
# 'N=%d_minPNum1=%d_minPNum2=%d_speedName=%s.csv' % (N, minPNum1,minPNum2,speedName)
if not os.path.exists(fname):
ANS.to_csv(fname, index=0, mode='a', encoding="utf-8-sig")
else:
ANS.to_csv(fname, index=0, mode='a',
header=None, encoding="utf-8-sig")
else:
return ANS
def appendRoadAttr(xpath, ypath, outpath, yrName='link_id', xrName='linkID', attrNames=['oneway', 'layer', 'bridge', 'tunnel', 'viaduct', 'numofLines', 'maxspeed', 'geometry']):
# '''
# 生成数据集合文件
# xpath 特征文件路径
# ypath 标签文件路径
# outpath 输出路径
# '''
"""
[拼接路段属性]
:param xpath: [特征文件路径]
:type xpath: [type]
:param ypath: [路段文件路径]
:type ypath: [type]
:param outpath: [description]
:type outpath: [输出文件路径]
:param yrName: [路段文件id], defaults to 'link_id'
:type yrName: str, optional
:param xrName: [特征文件id], defaults to 'linkID'
:type xrName: str, optional
:param attrNames: [待拼接属性], defaults to ['oneway','layer', 'bridge', 'tunnel', 'viaduct', 'numofLines','maxspeed','geometry']
:type attrNames: list, optional
"""
flist = getFileNames(xpath)
ylist = pd.read_csv(ypath)
Check = ylist[yrName].tolist()
for idx, f in enumerate(tqdm(flist)):
fname = f.split('/')[-1].split('.')[0]
tmp = pd.read_csv(f)
tmp = tmp[tmp[xrName].isin(Check)]
for attr in attrNames:
tmp[attr] = tmp[xrName].apply(
lambda x: ylist[ylist[yrName] == x][attr].values[0])
tmp.to_csv(outpath+fname+'dataSet.csv', index=0, encoding="utf-8-sig")
def getFilesBySize(filePath, descending=True):
"""
[按文件大小获取列表]
:param filePath: [文件路径]
:type filePath: [type]
:param descending: [降序], defaults to True
:type descending: bool, optional
:return: [description]
:rtype: [type]
"""
fileMap = {}
size = 0
for parent, dirnames, filenames in os.walk(filePath):
for filename in filenames:
size = os.path.getsize(os.path.join(parent, filename))
fileMap.setdefault(os.path.join(parent, filename), size)
filelist = sorted(fileMap.items(), key=lambda d: d[1], reverse=descending)
ANS = []
for filename, size in filelist:
ANS.append(filename)
return ANS
def vFilter(A, speedName='speed', vlimt=[5, 120]):
"""
[速度过滤]
:param A: [速度]
:type A: [type]
:param speedName: [速度列名], defaults to 'speed'
:type speedName: str, optional
:return: [description]
:rtype: [type]
"""
A = A[(A[speedName] >= vlimt[0]) & (A[speedName] <= vlimt[1])]
mu, sigma = norm.fit(A[speedName])
gFilter = [mu-3*sigma, mu+3*sigma]
A = A[(A[speedName] > gFilter[0]) & (A[speedName] < gFilter[1])]
return A
def dfReC(df, cname, cpos):
'''
dataframe 移动列位置
'''
tmp = df.pop(cname)
df.insert(cpos, cname, tmp)
return df
def vFilter2(A, speedName='speed', vlimt=[5, 120]):
A = A[(A[speedName] >= vlimt[0]) & (A[speedName] <= vlimt[1])]
percentile = np.percentile(
A[speedName], (25, 50, 75), interpolation='linear')
Q1 = percentile[0] # 上四分位数
Q2 = percentile[1]
Q3 = percentile[2] # 下四分位数
IQR = Q3 - Q1 # 四分位距
ulim = Q3 + 1.5*IQR # 上限 非异常范围内的最大值
llim = Q1 - 1.5*IQR # 下限 非异常范围内的最小值
if llim < 0:
llim = 0
gFilter = [llim, ulim]
A = A[(A[speedName] > gFilter[0]) & (A[speedName] < gFilter[1])] # 箱型图剔除异常
return A
def genNewspeed(A, vlimt=[5, 120],minAcc=50,velocityName='velocity',ws=0.5,wv=0.5):
"""
[速度过滤]
:param A: [速度]
:type A: [type]
:param speedName: [速度列名], defaults to 'speed'
:type speedName: str, optional
:return: [description]
:rtype: [type]
"""
# A=vFilter2(A,'speed')#先筛选一遍原始速度
# A=vFilter2(A,'velocity')#再筛一遍计算速度
speedName = 'speed'
A = A[(A[speedName] >= vlimt[0]) & (A[speedName] <= vlimt[1])]
speedName = velocityName
A = A[(A[speedName] >= vlimt[0]) & (A[speedName] <= vlimt[1])]
A['newspeed'] = A.apply(lambda a: a[velocityName]*wv+a.speed*ws if (a.type ==
0 and a[velocityName] > 0 and a.acc <= minAcc) else a.speed, axis=1) # 融合
A = vFilter2(A, 'newspeed') # 筛一遍新速度
return A
def csvMerge1(targetFile, outFile='', outName='filesMerge', title=None, sepstr=',', postfix='.csv', sortf=False, sortr=None, fileNameAsIdx=False):
"""
[合并csv文件]
:param targetFile: [目标文件夹]
:type targetFile: [type]
:param outFile: [输出文件夹], defaults to ''
:type outFile: str, optional
:param outName: [输出文件名], defaults to 'filesMerge'
:type outName: str, optional
:param title: [表头], defaults to None
:type title: [type], optional
:param sepstr: [分隔符], defaults to ','
:type sepstr: str, optional
:param postfix: [文件名后缀], defaults to '.csv'
:type postfix: str, optional
:param sortf: [是否排序文件], defaults to False
:type sortf: bool, optional
:param sortr: [排序规则], defaults to None
:type sortr: [例:key=lambda x: int(x.split('/')[-1][:-4])], optional
:param fileNameAsIdx: [文件名作为索引 路网项目组数据使用], defaults to False
:type fileNameAsIdx: bool, optional
"""
if outFile == '':
outFile = targetFile
filenameList = getFileNames(targetFile, postfix=postfix)
fnameList = getFileNames(targetFile, postfix=postfix, fullName=False)
if fileNameAsIdx == True:
idList = ['F'+str(i).rjust(7, '0') for i in range(len(fnameList))]
f = np.array(fnameList).reshape(-1, 1)
l = np.array(idList, np.str).reshape(-1, 1)
ANS = np.hstack([l, f])
ANS = pd.DataFrame(ANS, columns=['ID', 'fname']).to_csv(
outFile+'/ID-fname.csv', index=0)
if sortf == True:
filenameList.sort(key=lambda x: int(x.split('/')[-1][:-4]))
for idx, f in enumerate(filenameList):
tmp = pd.read_csv(f, sep=sepstr, names=title, header=None)
if fileNameAsIdx == True:
tmp['fileNames'] = idList[idx]
tmp[0] = tmp[0].apply(
lambda x: tmp['fileNames']+'P'+str(x).rjust(4, '0'))
if not title is None:
title.append('fileNames')
if idx == 0:
tmp.to_csv(outFile+outName+'.csv', mode='a',
header=title, index=False)
else:
tmp.to_csv(outFile+outName+'.csv', mode='a', header=0, index=False)
# csvMerge1('./test/','./',postfix='.txt',fileNameAsIdx=True)
def joind2data_hw(floder1, floder2, floder3):
list_dir = os.listdir(floder2) # bug
filesnum = len(list_dir)
# print(filesnum)
process = 1
for cur_filename in tqdm(list_dir):
# print(cur_filename)
process += 1
# print('\r', process, end='')
curfilepath1 = os.path.join(floder1, cur_filename)
curfilepath2 = os.path.join(floder2, cur_filename)
curfilepath3 = os.path.join(floder3, cur_filename)
with open(curfilepath1, 'rb')as p1_obj, open(curfilepath2, 'rb')as p2_obj:
p_lines1 = pd.read_csv(p1_obj, header=None)
p_lines2 = pd.read_csv(p2_obj, header=None)
# print(p_lines1)
# print(p_lines2)
p_lines2[5] = p_lines1[1]
p_lines2[6] = p_lines1[2]
p_lines2[7] = p_lines1[3]
p_lines2[8] = p_lines1[4]
p_lines2[9] = p_lines1[7]
p_lines2[10] = p_lines1[8]
# print(p_lines2)
p_lines2.to_csv(curfilepath3, float_format='%6f',
sep=',', header=None, index=0)
# with open(curfilepath3, 'rb')as p3_obj:
#p_lines3 = pd.read_csv(p3_obj, header=None)
# print(p_lines3)
# return
def dfReC(df, cname, cpos):
'''
dataframe 移动列位置
'''
tmp = df.pop(cname)
df.insert(cpos, cname, tmp)
return df
def cutBZ(df, clist=[]):
'''
删除备注
'''
for c in clist:
df[c] = df[c].apply(lambda x: x[0])
return df
def calXdis(geo):
'''计算位移'''
line = loads(geo)
plist = list(line[0].coords)
p0 = np.array(plist[0])
p1 = np.array(plist[-1])
ans = np.sqrt(np.sum(np.square(p0-p1)))
return ans
def calFeature1(datapath, outfile='', N=24, minPNum=50, speedName='speed', linkID='', maxspeed=''):
"""
[计算特征]
:param datapath: [输入数据/路径]
:type datapath: [type]
:param outfile: [输出路径未指定则以函数返回值形式处理], defaults to ''
:type outfile: str, optional
:param N: [最大前N个速度], defaults to 24
:type N: int, optional
:param minPNum: [最小特征点数], defaults to 10
:type minPNum: int, optional
:param speedName: [速度列名], defaults to 'speed'
:type minPNum: str, optional
:return: [特征:
VTopN:最大的前N个速度
Vmean:总体平均瞬时速度
Vstd:路段总体标准差
V85 百分之85车速
V15 百分之15车速
V95 百分之95车速
deltaV V85-V15
Vmode 路段速度众数]
:rtype: [type]
"""
oridata = pd.read_csv(datapath)
if len(oridata) < minPNum:
return 0
if linkID == '':
linkID = datapath.split('/')[-1].split('.')[0] # 分割link_id
if maxspeed == '':
maxspeed = datapath.split('/')[-2].split('maxspeed=')[1] # maxspeed
data = oridata[oridata[speedName] >= 5]
ANS = None
Features = [linkID, int(maxspeed)]
gs = data.groupby('hour')
Vs = []
for idx, g in gs:
if len(g) < minPNum:
continue
else:
vtmp = g[speedName].mean() # cal vTOPN
Vs.append(vtmp)
VTopN = heapq.nlargest(N, Vs)
if len(VTopN) < N:
VTopN.extend(['null']*(N-len(VTopN)))
Features.extend(VTopN)
tmp = data[speedName].copy()
percetIndex = list(np.arange(0, 1.05, 0.05))
if len(tmp) < minPNum:
Features.extend(['null']*24)
else:
tmp.sort_values(inplace=True)
Vmean = tmp.mean() # cal vmean #待修改
Vstd = tmp.std() # cal Rstd
# VMin,V5~V95,VMax
Vpercent = [tmp.quantile(p, interpolation='nearest')
for p in percetIndex]
Vmode = tmp.apply(lambda x: int(x+0.5)).mode()[0] # Vmode
Features.extend([Vmean, Vstd])
Features.extend(Vpercent)
Features.append(Vmode)
if ANS is None:
ANS = np.array([[c] for c in Features]).T
else:
ansTmp = np.array([[c] for c in Features]).T
ANS = np.vstack((ANS, ansTmp))
cols = ['linkID', 'maxspeed']
VTopN = ['VTop'+str(idx) for idx in range(N)]
cols.extend(VTopN)
cols.extend(['Vmean', 'Vstd'])
cols.extend(['V'+str(p*100) for p in percetIndex])
cols.extend(['Vmode'])
ANS = pd.DataFrame(ANS, columns=cols)
if outfile != '':
# fname = str(linkID)+'_特征工程.csv'
# ANS.to_csv(outfile+fname, index=0)
fname = outfile + \
'N=%d_minPNum=%d_speedName=%s.csv' % (N, minPNum, speedName)
if not os.path.exists(fname):
ANS.to_csv(fname, index=0, mode='a')
else:
ANS.to_csv(fname, index=0, mode='a', header=None)
else:
return ANS
def genDataSet1(datapath,
roadpath,
outfile='',
dname='测试数据集',
speedName='speed',
minPNum=50,
yrName='link_id',
xrName='linkID',
linkID='',
maxspeed='',
attrNames=['oneway', 'layer', 'bridge', 'tunnel', 'viaduct',
'numofLines', 'geometry', 'Sdis', 'Xdis', 'RC'],
dropWifi=False,
dropViaduct=False,
vTopNMode=1,
percent=0.5):
"""
[生成数据集]
:param datapath: [轨迹数据路径]
:type datapath: [type]
:param roadpath: [路网数据路径]
:type roadpath: [type]
:param outfile: [输出路径], defaults to ''
:type outfile: str, optional
:param dname: [输出文件名], defaults to '测试数据集'
:type dname: str, optional
:param speedName: [速度名], defaults to 'speed'
:type speedName: str, optional
:param minPNum: [参与统计的最小点数], defaults to 50
:type minPNum: int, optional
:param yrName: [路网id列名], defaults to 'link_id'
:type yrName: str, optional
:param xrName: [轨迹id列名], defaults to 'linkID'
:type xrName: str, optional
:param linkID: [轨迹路段linkid], defaults to ''
:type linkID: str, optional
:param maxspeed: [轨迹路段限速], defaults to ''
:type maxspeed: str, optional
:param attrNames: [拼接属性名], defaults to ['oneway', 'layer', 'bridge', 'tunnel', 'viaduct', 'numofLines', 'geometry','Sdis','Xdis','RC']
:type attrNames: list, optional
:param dropWifi: [删除wifi点], defaults to False
:type dropWifi: bool, optional
:param dropViaduct: [删除有遮挡路段], defaults to False
:type dropViaduct: bool, optional
:return: [description]
:rtype: [type]
"""
oridata = pd.read_csv(datapath)
if dropWifi == True:
oridata = oridata[oridata.type == 0]
oridata.reset_index(drop=True, inplace=True)
pNum = len(oridata)
if pNum < minPNum:
return 0
if linkID == '':
linkID = datapath.split('/')[-1].split('.')[0] # 分割link_id
if maxspeed == '':
maxspeed = datapath.split('/')[-2].split('maxspeed=')[1] # maxspeed
data = oridata[(oridata[speedName] >= 5) & (oridata[speedName] <= 120)]
ANS = None
Features = [linkID, int(maxspeed), pNum]
gs = data.groupby('hour')
Vs = []
for idx, g in gs:
if len(g) < minPNum:
continue
else:
if vTopNMode==1:
vtmp = g[speedName].mean() # cal vTOPN #TODO待改(不是取平均值,而是取分位数值)
else:
vtmp = g[speedName].quantile(percent,interpolation='nearest') #TAG 不是取平均值,而是取分位数值 50
Vs.append(vtmp)
VTopN = heapq.nlargest(24, Vs)
N = len(VTopN)
if N < 24:
VTopN.extend(['null']*(24-len(VTopN)))
Features.extend(VTopN)
tmp = data[speedName].copy()
percetIndex = list(np.arange(0, 1.05, 0.05))
if len(tmp) < minPNum:
Features.extend(['null']*24)
else:
tmp.sort_values(inplace=True)
Vmean = tmp.mean() # cal vmean #FIXME 待修改
Vstd = tmp.std() # cal Vstd
# VMin,V5~V95,VMax
Vpercent = [tmp.quantile(p, interpolation='nearest')
for p in percetIndex]
Vmode = tmp.apply(lambda x: int(x+0.5)).mode()[0] # Vmode
Features.extend([Vmean, Vstd])
Features.extend(Vpercent)
Features.append(Vmode)
if ANS is None:
ANS = np.array([[c] for c in Features]).T
else:
ansTmp = np.array([[c] for c in Features]).T
ANS = np.vstack((ANS, ansTmp))
cols = ['linkID', 'maxspeed', 'pNum']
VTopN = ['VTop'+str(idx) for idx in range(24)]
cols.extend(VTopN)
cols.extend(['Vmean', 'Vstd'])
cols.extend(['V'+str(int(p*100)) for p in percetIndex])
cols.extend(['Vmode'])
ANS = pd.DataFrame(ANS, columns=cols)
ylist = pd.read_csv(roadpath)
Check = ylist[yrName].tolist()
ANS = ANS[ANS[xrName].isin(Check)]
if len(ANS)==0:
return 0
if len(attrNames) == 0:
attrNames = list(ylist)
for attr in attrNames:
ANS[attr] = ANS[xrName].apply(
lambda x: ylist[ylist[yrName] == x][attr].values[0])
if dropViaduct == True and ANS.loc[0, 'viaduct'] == 'T':
return 0
else:
if outfile != '':
fname = outfile + '%s.csv' % (dname)
# 'N=%d_minPNum1=%d_minPNum2=%d_speedName=%s.csv' % (N, minPNum1,minPNum2,speedName)
if dropViaduct==True:
ANS.drop('viaduct',axis=1,inplace=True)
if not os.path.exists(fname):
ANS.to_csv(fname, index=0, mode='a', encoding="utf-8-sig")
else:
ANS.to_csv(fname, index=0, mode='a',
header=None, encoding="utf-8-sig")
else:
return ANS
def trans_data(data=None, fname='特征测试_newspeed_遮挡.csv', path='D:/DiplomaProject/output/paper/特征测试/', minN=2, vlist=[20, 30, 40, 50, 60, 70, 80], onehot_y=True, minmax_x=True, onehot_x=False, concols=['viaduct'], out2File=False, outpath='D:/DiplomaProject/output/paper/特征测试/', postfix='转换'):
if data is None:#TODO 该函数待重构
data = pd.read_csv(path+fname)
data.maxspeed = data.maxspeed.astype(int)
data.drop(['linkID', 'pNum'], axis=1, inplace=True)
data = data[data.maxspeed.isin(vlist)]
data = data.reset_index(drop=True)
tmp = data['maxspeed']
if onehot_y == True:
tmp = pd.get_dummies(tmp)
else:
tmp = pd.DataFrame(tmp)
df = data.drop('maxspeed', axis=1)
if onehot_x == True:
df = pd.get_dummies(df, columns=concols)
if minmax_x == True:
df = df.apply(lambda x: (x - np.min(x)) / (np.max(x) - np.min(x)))
ans = tmp.join(df)
vdrop = ['VTop'+str(i) for i in range(minN, 24)]
ans.drop(vdrop, axis=1, inplace=True)
ans.dropna(inplace=True)
ans.reset_index(drop=True, inplace=True)
if out2File == True and outpath != '':
new_name = fname.replace('.csv', '_%s.csv' % postfix)
ans.to_csv(outpath+new_name, index=0)
else:
return ans
def getSample(rpath='../../output/paper/轨迹_按link_id_v2_Pts_minN=50_bufR=1e-05_dropWifi=False_velocity=velocity1/',
outfile='../../output/paper/样本路段/',
sampleSize=[0,0.25,0.5,0.75]):
"""
[获取样本]
:param rpath: [轨迹路径], defaults to '../../output/paper/轨迹_按link_id_v2_Pts_minN=50_bufR=1e-05_dropWifi=False_velocity=velocity1/'
:type rpath: str, optional
:param outfile: [样本输出路径], defaults to '../../output/paper/样本路段/'
:type outfile: str, optional
:param sampleSize: [样本分位数列表], defaults to [0,0.25,0.5,0.75]
:type sampleSize: list, optional
"""
postfix=rpath.split('/')[-2].split('轨迹_')[1]
flist0=getFileNames(rpath)
sampleName=[str(int(100*(1-i)))+'位' for i in sampleSize]
# sampleName=['最大','75位','50位','25位']
for f in flist0:
slist=[]
flist1=getFilesBySize(f)
for i in range(len(sampleName)):
tmp=flist1[int(len(flist1)*sampleSize[i])]#百分位值
slist.append(tmp.replace('\\','/')+'|%s'%sampleName[i])
for s in slist:
tmp0=s.split('|')
tmp=tmp0[0].split('/')
fname=tmp[-2]+'_%s_'%tmp0[1]+tmp[-1]
pd.read_csv(tmp0[0]).to_csv(pathCheck(outfile+'%s/%s/'%(postfix,tmp[-2]))+fname,index=0)
return outfile+'%s/'%postfix
| true |
94e3b3b76e45c22d48287f23c46a849fcd7e220c
|
Python
|
orenkek/MousesOwlsSocialNetwork
|
/TelegramBotCommand/aboutMeCommand.py
|
UTF-8
| 1,171 | 2.578125 | 3 |
[] |
no_license
|
from telegram import Update
from telegram.ext import CallbackContext, ConversationHandler
import repository
def aboutMe(update: Update, context: CallbackContext) -> None:
userId = update.message.chat_id
owl = repository.getOwl(userId)
if(owl != None):
update.message.reply_text(
'You are owl!\n'
'Your userName is {userName}\n'.format(userName = owl.userName) +
'Your happinessLevel is {happinessLevel}\n'.format(happinessLevel = owl.happinessLevel) +
'Your satietyLevel is {satietyLevel}'.format(satietyLevel = owl.satietyLevel))
return
mouse = repository.getMouse(userId)
if (mouse != None):
update.message.reply_text(
'You are mouse!\n'
'Your userName is {userName}\n'.format(userName = mouse.userName) +
("You are alive" if mouse.isLive else "You are dead"))
return
update.message.reply_text(
'You are not registered!')
def live(update: Update, context: CallbackContext) -> None:
userId = update.message.chat_id
mouse = repository.getMouse(userId)
mouse.isLive = True
repository.saveMouse(mouse)
| true |
d58c86c774178a4e66e0c70e7ea4b82bb1430cf7
|
Python
|
codeprogredire/Python_Coding
|
/101.py
|
UTF-8
| 549 | 3.578125 | 4 |
[] |
no_license
|
'''
Given an array of n elements. We need to answer q
queries telling the sum of elements in range l to
r in the array.
Prefix sum
'''
from sys import stdin,stdout
n=int(stdin.readline())
arr=list(map(int,stdin.readline().split()))
tot=0
preSum=[]
for i in range(n):
tot+=arr[i]
preSum.append(tot)
q=int(stdin.readline())
for i in range(q):
query=list(map(int,stdin.readline().split()))
L,R=query[0],query[1]
if L>0:
ans=preSum[R]-preSum[L-1]
else:
ans=preSum[R]
stdout.write(str(ans)+'\n')
| true |
e2b9ccdefbd0e65773a8876e2bcc64d3bdb45c7c
|
Python
|
Knight-zhang/economic
|
/实验2-久期的计算与应用/Duration.py
|
UTF-8
| 510 | 2.765625 | 3 |
[] |
no_license
|
from scipy import *
def Duration(c,y,f,num_beg,n):
a=1/f
c=100*c
t=num_beg/365
p=0
s=0
for i in range(n-1):
p_i=c*(1+y)**(-t)
p+=p_i
s_i=(c/(1+y)**t)*t
s+=s_i
t+=a
v_pr=(100+c)/(1+y)**t
s_pr=((c+100)/(1+y)**t)*t
p=p+v_pr
s+=s_pr
D=s/p
D_fix=D/(1+y)
D_dol=D*p
print("久期为:D=%.4f" % D)
print("修正久期为:D_fix=%.4f" % D_fix)
print("美元久期为:D_dol=%.4f" % D_dol)
| true |
cf7d6fdf438dad49f336c2ba673e01ce363d6ec5
|
Python
|
mateuszmidor/PythonStudy
|
/usd-to-pln/main.py
|
UTF-8
| 441 | 3.359375 | 3 |
[] |
no_license
|
import requests
def GetUsdToPlnRate(date):
# Construct the API URL
url = f'https://api.frankfurter.app/{date}?from=USD&to=PLN'
# Send a GET request to the API and parse the JSON response
response = requests.get(url)
data = response.json()
# Extract the exchange rate from the response and return it
rate = data['rates']['PLN']
return rate
rate = GetUsdToPlnRate('2023-03-20')
print(rate) # Output: 4.3185
| true |
5458d6f1ef2075b3f6b56aae681e329d1e767a94
|
Python
|
derlih/async-fsm
|
/tests/test_state.py
|
UTF-8
| 2,370 | 2.828125 | 3 |
[
"MIT"
] |
permissive
|
import asyncio
from contextlib import contextmanager
from unittest.mock import MagicMock
import pytest
from async_fsm.exceptions import *
from async_fsm.state import *
async def check_state(state, enter, exit):
assert enter.call_count == 0
assert exit.call_count == 0
for x in range(1, 3):
await state.enter()
assert enter.call_count == x
assert exit.call_count == x - 1
await state.exit()
assert enter.call_count == x
assert exit.call_count == x
@pytest.mark.asyncio
async def test_sync_cm():
enter = MagicMock()
exit = MagicMock()
@contextmanager
def sync_state():
enter()
yield
exit()
s = StateContextManager(sync_state)
await check_state(s, enter, exit)
@pytest.mark.asyncio
async def test_sync_cm_as_function():
enter = MagicMock()
exit = MagicMock()
@contextmanager
def sync_state():
enter()
yield
exit()
s = StateFunction(sync_state)
await check_state(s, enter, exit)
@pytest.mark.asyncio
async def test_coroutine_function():
enter = MagicMock()
async def fn():
enter()
s = StateCoroutineFunction(fn)
assert enter.call_count == 0
await s.enter()
enter.assert_called_once()
await s.exit()
@pytest.mark.asyncio
async def test_async_cm():
enter = MagicMock()
exit = MagicMock()
class AsyncCM:
async def __aenter__(self):
enter()
async def __aexit__(self, exc_type, exc, tb):
exit()
s = StateAsyncContextManager(AsyncCM)
await check_state(s, enter, exit)
@pytest.mark.asyncio
async def test_sync_function():
enter = MagicMock()
def fn():
enter()
s = StateFunction(fn)
assert enter.call_count == 0
await s.enter()
enter.assert_called_once()
await s.exit()
@pytest.mark.asyncio
async def test_sync_cm_function():
enter = MagicMock()
exit = MagicMock()
cm = pytest.helpers.CM(enter, exit)
def fn():
return cm
s = StateFunction(fn)
await check_state(s, enter, exit)
@pytest.mark.asyncio
async def test_async_cm_function():
enter = MagicMock()
exit = MagicMock()
cm = pytest.helpers.AsyncCM(enter, exit)
def fn():
return cm
s = StateFunction(fn)
await check_state(s, enter, exit)
| true |
0e7faad8b944cdbb4a687fa829fbd98bb33c9edf
|
Python
|
praxis001/blackjack
|
/blackjack project_scheme.py
|
UTF-8
| 888 | 3.234375 | 3 |
[] |
no_license
|
#1 card preparations
#2 money preparations (under construction)
#2 should make the input integer.
#3 distributing cards for the game
#4 card scoring
#5 calculating total score
#6 request for betting
#6 (constructing, should make the input integer.)
#6 (construction needed: the betting money cannot be more than the ready money.)
#7 printing the initial deal
#8 function to request player's following action
#9 designing game step after Hitting(constructing)
#10 designing game step after Standing(constructing)
#11 designing game step after Doubling Down(constructing)
#12 designing game step after Splitting(constructing)
#?? calculating the balance accarding to the game result
def win()
def
#?? must prepare new deck if current deck is almost exhausted
def f(*a):
return 'b'
print(f('c'))
print(f())
| true |
9033bd5121f4ac31494392f6907fbda07b4f8558
|
Python
|
VoxelPixel/CiphersInPython
|
/AtBash.py
|
UTF-8
| 1,786 | 3.84375 | 4 |
[
"MIT"
] |
permissive
|
# *********
# -*- Made by VoxelPixel
# -*- For YouTube Tutorial
# -*- https://github.com/VoxelPixel
# -*- Support me on Patreon: https://www.patreon.com/voxelpixel
# *********
def at_encryption():
alpa = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# reversing alphabets of alpa variable
rev_alpa = alpa[::-1]
message = input("Enter message: ").upper();
encry_text = ""
for i in range(len(message)):
if message[i] == chr(32):
encry_text += " "
else:
for j in range(len(alpa)):
if message[i] == alpa[j]:
encry_text += rev_alpa[j]
break
# if
# inner for
# if-else
# for
print("Encrypted Text: {}".format(encry_text))
def at_decryption():
alpa = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# reversing alphabets of alpa variable
rev_alpa = alpa[::-1]
message = input("Enter message: ").upper();
dencry_text = ""
for i in range(len(message)):
if message[i] == chr(32):
dencry_text += " "
else:
for j in range(len(rev_alpa)):
if message[i] == rev_alpa[j]:
dencry_text += alpa[j]
break
# if
# inner for
# if-else
# for
print("Decrypted Text: {}".format(dencry_text))
def main():
choice = int(input("1. Encryption\n2.Decryption\nChoose(1,2): "))
if choice == 1:
print("---Encryption---")
at_encryption()
elif choice == 2:
print("---Decryption---")
at_decryption()
else:
print("Wrong Choice")
if __name__ == "__main__":
main()
| true |
20d760a9cc1697f5b9dad53b174077e82001d18b
|
Python
|
DavidZamanian/AutoFill-and-Checkout-for-supreme
|
/AutoBuySupreme.py
|
UTF-8
| 2,114 | 2.5625 | 3 |
[] |
no_license
|
from selenium import webdriver
from Config_for_supreme import keys
import time
# Add URL and details in the config file #
def timeme(method):
def wrapper(*args, **kw):
startTime = int(round(time.time() * 1000))
result = method(*args, **kw)
endTime = int(round(time.time() * 1000))
print((endTime - startTime)/1000, 's')
return result
return wrapper
@timeme
def order():
# add to cart
driver.find_element_by_name('commit').click()
# wait for checkout button element to load
time.sleep(.5)
checkout_element = driver.find_element_by_class_name('checkout')
checkout_element.click()
# fill out checkout screen fields
driver.find_element_by_xpath('//*[@id="order_billing_name"]').send_keys(keys['name'])
driver.find_element_by_xpath('//*[@id="order_email"]').send_keys(keys['email'])
driver.find_element_by_xpath('//*[@id="order_tel"]').send_keys(keys['phone_number'])
driver.find_element_by_xpath('//*[@id="bo"]').send_keys(keys['street_address'])
driver.find_element_by_xpath('//*[@id="order_billing_zip"]').send_keys(keys['zip_code'])
driver.find_element_by_xpath('//*[@id="order_billing_city"]').send_keys(keys['city'])
driver.find_element_by_xpath('//*[@id="order_billing_country"]/option[34]').click()
driver.find_element_by_xpath('//*[@id="credit_card_type"]/option[3]').click()
driver.find_element_by_xpath('//*[@id="cnb"]').send_keys(keys['card_number'])
driver.find_element_by_xpath('//*[@id="credit_card_month"]/option[9]').click()
driver.find_element_by_xpath('//*[@id="credit_card_year"]/option[5]').click()
driver.find_element_by_xpath('//*[@id="vval"]').send_keys(keys['card_cvv'])
driver.find_element_by_xpath('//*[@id="cart-cc"]/fieldset/p/label/div/ins').click()
process_payment = driver.find_element_by_xpath('//*[@id="pay"]/input')
process_payment.click()
if __name__ == '__main__':
# load chrome
driver = webdriver.Chrome('./chromedriver')
# get product url
driver.get(keys['product_url'])
order()
| true |
bbdffb307975afca7c8b5c40fe003c1ae8113c5d
|
Python
|
bhuguu/GameSimulator
|
/Components/Node.py
|
UTF-8
| 786 | 2.84375 | 3 |
[] |
no_license
|
from .InformationSet import InformationSet
class Node:
def __init__(self, info_set_table, player):
self.next_nodes = []
self.player = player
self.info_set = InformationSet(info_set_table, self)
def get_info_set(self):
return self.info_set
def set_info_set(self, info_set):
self.info_set = info_set
def get_next_nodes(self):
return self.next_nodes
def connect(self, other):
self.next_nodes.append(other)
def move(self):
return self.next_nodes[self.player.choose_action(self.info_set)]
def merge(self, other):
self.info_set.merge(other.get_info_set())
def set_as_terminal(self, payoff, result_table):
result_table.register_info_set_as_terminal(self.info_set, payoff)
| true |
bc138e82fe894378594602adb2e1bf2ca021a31e
|
Python
|
ChenLaiHong/pythonBase
|
/test/homework3/10.1.1-文件加减法.py
|
UTF-8
| 553 | 3.28125 | 3 |
[] |
no_license
|
r = open("jisuan.txt", "r")
f1 = open("jieguo.txt", "w")
# 读写操作
content = r.readlines()
for i in content:
if i == "":
continue
else:
if "+" in i:
temp = i.split("+")
f1.write(str("%0.2f" % (float(temp[0]) + float(temp[1]))) + '\n')
elif "-" in i:
temp = i.split("-")
f1.write(str("%0.2f" % (float(temp[0]) - float(temp[1]))) + '\n')
# 关闭文件
r.close()
r1 = open("jieguo.txt", "r")
content1 = r1.readlines()
for i in content1:
print(i, end="")
r1.close()
| true |
d2f800ff7daef57f4a33d9cb92078338f006b42f
|
Python
|
yiyayiyayoaaa/pygame-alien
|
/setting.py
|
UTF-8
| 537 | 2.640625 | 3 |
[] |
no_license
|
class Settings(object):
'''存储所有设置的类'''
# 定义画面帧率
def __init__(self):
'''初始化游戏的设置'''
self.FRAME_RATE = 60
self.screen_width = 600
self.screen_height = 800
self.bg_color = (230, 230, 230)
self.ship_speed_factor = 8
self.bullet_speed_factor = 12
self.bullet_speed_factor2 = 6
self.bullet_width = 5
self.bullet_height = 10
self.bullet_color = (255, 100, 100)
self.alien_speed_factor = 4
| true |
c5e492f788d03d5a559333a9d064f2ea76f2faa6
|
Python
|
dawdawdo/riddler_battle
|
/permutations.py
|
UTF-8
| 612 | 2.9375 | 3 |
[] |
no_license
|
# Standard Modules
from itertools import permutations
import logging as log
# User Modules
from cfg import app
@app()
def main():
log.info('Openinig output file...')
with open(r'C:\ProgramData\Scratchwork\permnutations.txt', mode='w') as twr:
log.info('Output file open')
for i, p in enumerate(permutations(range(1,11))):
twr.write(repr(p).replace('(', '').replace(')', '') + '\n')
if (1 + i) % 1000000 == 0:
log.info('Line %i written: %s' % (1+i, repr(p)))
log.info('permutations complete')
if __name__ == '__main__': main()
| true |
b59b5388440e93207e5bd396e54c6efc95fbc75c
|
Python
|
santitobon9/Cloud-Cluster-Freeway-Project
|
/src/query5.py
|
UTF-8
| 1,472 | 2.8125 | 3 |
[] |
no_license
|
from pymongo import MongoClient
from pprint import pprint
import getpass as gp
pw = gp.getpass()
username = "DJs"
password = pw
dbname = "djs-freeway"
uri = "mongodb+srv://" + username + ":" + password + \
"@ccdm-project.f4c6t.mongodb.net/" + dbname + "?retryWrites=true&w=majority"
try:
client = MongoClient(uri)
db = client.test
print("Connected Successfully!!!")
except:
print("Could not connect to db :( ")
mydb = client[dbname]
de_collection = mydb["freeway_detectors"]
lp_collection = mydb["freeway_loopdata"]
#Query 5 Find the path from Johnson Creek to I-205 NB at Columbia
i = 0
text = 'Johnson Cr NB'
print(i,":",text)
while text != 'I-205 NB at Columbia':
qry5 = [ {'$match': {'locationtext': text}},
{'$lookup': {
'from': 'freeway_detectors',
'let': {'down': '$station.downstream',
'lanenum': '$lanenumber'},
'pipeline': [{'$match': {'$expr': {
'$eq': ['$station.stationid', '$$down']}}},
{'$match': {'$expr': {'$eq': ['$lanenumber', '$$lanenum']}}}
],
'as': 'downstation'}},
{'$limit': 1},
{'$unwind': {'path': '$downstation'}},
{'$project': {'downstation.locationtext': 1}}]
cursor = de_collection.aggregate(qry5)
result = list(cursor)
i += 1
for doc in result:
text2 = doc["downstation"]
text = text2["locationtext"]
print(i,":",text)
| true |
04ec365679df6ca164891dc8bbfbd6c44840e2c3
|
Python
|
Rahul2706/Python_Exercises
|
/ex9.py
|
UTF-8
| 304 | 4.5625 | 5 |
[] |
no_license
|
"""Temperature of a city in Fahrenheit degrees is input through
the keyboard. Write a program to convert this temperature
into Centigrade degrees.
"""
Fahrenheit = int(input("Enter temp. in Fahrenheit : ")) #(32°F − 32) × 5/9 = 0°C
Centigrade = (Fahrenheit - 32)*(5/9)
print(Centigrade)
| true |
d185ed94cf607326f274ac67a7b494b970db9bcf
|
Python
|
tnakaicode/jburkardt-python
|
/subset/rat_to_dec.py
|
UTF-8
| 3,853 | 2.609375 | 3 |
[] |
no_license
|
#! /usr/bin/env python3
#
import numpy as np
import matplotlib.pyplot as plt
import platform
import time
import sys
import os
import math
from mpl_toolkits.mplot3d import Axes3D
from sys import exit
sys.path.append(os.path.join("../"))
from base import plot2d, plotocc
from timestamp.timestamp import timestamp
from i4lib.i4vec_print import i4vec_print
from i4lib.i4mat_print import i4mat_print, i4mat_print_some
from r8lib.r8vec_print import r8vec_print
from r8lib.r8mat_print import r8mat_print, r8mat_print_some
from r8lib.r8mat_write import r8mat_write
from i4lib.i4_gcd import i4_gcd
from i4lib.i4_uniform_ab import i4_uniform_ab
from r8lib.r8_to_dec import r8_to_dec
from subset.dec_to_rat import dec_to_rat
def rat_to_dec(top, bot):
# *****************************************************************************80
#
# RAT_TO_DEC converts a rational to a decimal representation.
#
# Discussion:
#
# A rational value is represented by TOP / BOT.
#
# A decimal value is represented as MANTISSA * 10^EXPONENT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 11 June 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer TOP, BOT, the rational value.
#
# Output, integer MANTISSA, EXPONENT, the decimal number.
#
if (top == 0):
mantissa = 0
exponent = 0
return mantissa, exponent
gcd = i4_gcd(top, bot)
top = (top // gcd)
bot = (bot // gcd)
if (bot < 0):
top = -top
bot = -bot
if (bot == 1):
mantissa = top
exponent = 0
return mantissa, exponent
exponent = 0
while ((bot % 10) == 0):
exponent = exponent - 1
bot = (bot // 10)
while ((top % 10) == 0):
exponent = exponent + 1
top = (top // 10)
r = float(top) / float(bot)
if (r < 0.0):
s = -1
r = -r
else:
s = 1
while (r != round(r)):
r = r * 10.0
exponent = exponent - 1
mantissa = s * r
return mantissa, exponent
def rat_to_dec_test():
# *****************************************************************************80
#
# RAT_TO_DEC_TEST tests RAT_TO_DEC.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 11 June 2015
#
# Author:
#
# John Burkardt
#
print('')
print('RAT_TO_DEC_TEST')
print(' Python version: %s' % (platform.python_version()))
print(' RAT_TO_DEC fraction => decimal,')
print('')
print(' In this test, choose the top and bottom')
print(' of a rational at random, and compute the')
print(' equivalent real number.')
print('')
print(' Then convert to decimal, and the equivalent real.')
print('')
print(' Then convert back to rational and the equivalent real.')
seed = 123456789
for i in range(0, 10):
rat_top, seed = i4_uniform_ab(-1000, 1000, seed)
rat_bot, seed = i4_uniform_ab(1, 1000, seed)
r1 = float(rat_top) / float(rat_bot)
mantissa, exponent = rat_to_dec(rat_top, rat_bot)
r2 = float(mantissa) * 10.0 ** exponent
rat_top2, rat_bot2 = dec_to_rat(mantissa, exponent)
r3 = float(rat_top2) / float(rat_bot2)
print('')
print(' %g = %d / %d' % (r1, rat_top, rat_bot))
print(' %g = %d * 10^%d' % (r2, mantissa, exponent))
print(' %g = %d / %d' % (r1, rat_top2, rat_bot2))
#
# Terminate.
#
print('')
print('RAT_TO_DEC_TEST')
print(' Normal end of execution.')
return
if (__name__ == '__main__'):
from timestamp import timestamp
timestamp()
rat_to_dec_test()
timestamp()
| true |
0ea6edce0f18487af525ac37989ad6411ac2c98d
|
Python
|
naitemach/IT_2ndyearlab
|
/dsa/lab4/hashtable.py
|
UTF-8
| 1,348 | 3.6875 | 4 |
[] |
no_license
|
class HashTable(object):
def __init__(self):
self.t=[None for i in range(30)]
def insertKey(self,key,value):
val=hashvalue(key)
slot=val%30
if self.t[slot]==None:
self.t[slot]=LinkedList()
self.t[slot].insertAtHead(key,value)
def searchKey(self,key):
value=hashvalue(key)
slot=value%30
temp=self.t[slot]
res=temp.searchLinkedList(temp.head,key)
print(res)
def keys(self):
for i in self.t:
if i!=None:
if i.head!=None:
temp=i.head
while temp!=None:
print(temp.key,end=",")
temp=temp.next
print()
class LinkedList(object):
def __init__(self):
self.head=None
def insertAtHead(self,key,value):
temp=ListNode()
temp.key=key
temp.value=value
if self.head!=None:
temp.next=self.head
self.head=temp
def searchLinkedList(self,head,key):
temp=self.head
if temp==None:
return false
else:
while temp!=None:
if temp.key==key:
return True
temp=temp.next
return False
class ListNode(object):
def __init__(self,key=None,value=None):
self.key=key
self.value=value
self.next=None
def hashvalue(key):
value=0
for i in key:
value+=ord(i)
return value
def main():
ob=HashTable()
ob.insertKey("cat",1)
ob.insertKey("act",2)
ob.insertKey("cow",3)
ob.insertKey("parrot",4)
ob.keys()
ob.searchKey("cat")
if __name__ == '__main__':
main()
| true |
bd6dfa44a41b187d2351316df50e1520737cb667
|
Python
|
oywm/LearnCode
|
/GUI/grid.py
|
UTF-8
| 495 | 2.96875 | 3 |
[] |
no_license
|
from tkinter import *
from tkinter import messagebox
root = Tk()
Label(root, text='账号:').grid(row=0)
Label(root, text='密码:').grid(row=1)
def callback():
if messagebox._show(message='您好,登陆成功'):
message = e1.get()
print(message)
print('欢迎进入游戏')
e1 = Entry(root)
e1.grid(row=0, column='1')
e2 = Entry(root)
e2.grid(row=1, column='1')
button = Button(root, text='登陆', command=callback)
button.grid(row=2)
root.mainloop()
| true |
6d94018cc995fa82a646ddecb1a49d36b7c1e8bb
|
Python
|
ShirleyMwombe/Training2
|
/file detection.py
|
UTF-8
| 281 | 3.390625 | 3 |
[] |
no_license
|
import os
path = 'D:\\Linux\\test'
if os.path.exists(path):
print('That path exits')
if os.path.isfile(path):
print('That is a file')
elif os.path.isdir(path):
print("That is a directory")
else:
print('That location does not exist')
| true |
967c08a27a7277936816b2e37e1f3ba8c5b21769
|
Python
|
furahadamien/Sentiment-Analysis
|
/analyzer.py
|
UTF-8
| 4,632 | 2.578125 | 3 |
[] |
no_license
|
#import io
from sklearn.datasets import load_files
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from nltk import FreqDist
import numpy as np
from sklearn.linear_model import SGDClassifier
from sklearn import metrics
from sklearn.model_selection import GridSearchCV
from nltk import NaiveBayesClassifier
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from nltk import classify
categories =['neg', 'pos']
directory = 'rt-polaritydata/rt-polaritydata/training'
train_dirctory = 'rt-polaritydata/rt-polaritydata/testing'
reviews = load_files(directory)
review_test = load_files(train_dirctory)
docs_test = review_test.data
count_vect = CountVectorizer()
X_train_counts = count_vect.fit_transform(reviews.data)
tf_transformer = TfidfTransformer(use_idf=False).fit(X_train_counts)
X_train_tf = tf_transformer.transform(X_train_counts)
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
# training the classifier
#Multinomial Naive Bayes classifier
clf = MultinomialNB().fit(X_train_tfidf, reviews.target)
docs_new = [' this is a terrible move and I would never watch it']
X_new_counts = count_vect.transform(docs_test)
X_new_tfidf = tfidf_transformer.transform(X_new_counts)
predicted = clf.predict(X_new_tfidf)
for doc, category in zip(docs_new, predicted):
print('%r => %s' % (doc, reviews.target_names[category]))
#building a pipeline
text_clf = Pipeline([('vect', CountVectorizer()),('tfidf', TfidfTransformer()),('clf', MultinomialNB()),])
text_clf.fit(reviews.data,reviews.target)
#evaluation of model
predicted = text_clf.predict(docs_test)
print('Naive Bayes accuracy %r:' % np.mean(predicted == review_test.target))
print('Naive Bayes model confusion Matrix')
print(metrics.confusion_matrix(review_test.target, predicted))
#print(metrics.classification_report(reviews.target, predicted, target_names=review_test.target_names))
parameters = {'vect__ngram_range': [(1, 1), (1, 2)],
'tfidf__use_idf': (True, False),
'clf__alpha': (1e-2, 1e-3),
}
gs_clf = GridSearchCV(text_clf, parameters, cv=5, iid=False, n_jobs=-1)
gs_clf = gs_clf.fit(reviews.data[:400], reviews.target[:400])
print(reviews.target_names[gs_clf.predict(['God is love'])[0]])
print(gs_clf.best_score_ )
for param_name in sorted(parameters.keys()):
print("%s: %r" % (param_name, gs_clf.best_params_[param_name]))
# Support Vector Machine model
text_clf = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier(loss='hinge', penalty='l2',
alpha=1e-3, random_state=42,
max_iter=5, tol=None)),])
text_clf.fit(reviews.data, reviews.target)
#SVM evaluation
predicted = text_clf.predict(docs_test)
print('Support Vector Machine accuracy %r:' %np.mean(predicted == review_test.target) )
print('Support Vector Machin model confusion Matrix')
print(metrics.confusion_matrix(review_test.target, predicted))
#print(metrics.classification_report(reviews.target, predicted,target_names=review_test.target_names))
parameters = {'vect__ngram_range': [(1, 1), (1, 2)],
'tfidf__use_idf': (True, False),
'clf__alpha': (1e-2, 1e-3),
}
gs_clf = GridSearchCV(text_clf, parameters, cv=5, iid=False, n_jobs=-1)
gs_clf = gs_clf.fit(reviews.data[:400], reviews.target[:400])
print(reviews.target_names[gs_clf.predict(['God is love'])[0]])
print(gs_clf.best_score_ )
for param_name in sorted(parameters.keys()):
print("%s: %r" % (param_name, gs_clf.best_params_[param_name]))
#Logistic Regression model
pipeline = Pipeline([('vect', CountVectorizer()),('tfidf', TfidfTransformer()),('clf', LogisticRegression())
])
pipeline.fit(reviews.data,reviews.target)
predicted = pipeline.predict(docs_test)
print('Logistic regression accuracy %r:' %np.mean(predicted == review_test.target) )
print('Logistic Regression confusion Matrix')
print(metrics.confusion_matrix(review_test.target, predicted))
#print(metrics.classification_report(reviews.target, predicted,target_names=review_test.target_names))
#LR evaluation
parameters = {'vect__ngram_range': [(1, 1), (1, 2)],
'tfidf__use_idf': (True, False),
'clf__alpha': (1e-2, 1e-3),
}
gs_clf = GridSearchCV(text_clf, parameters, cv=5, iid=False, n_jobs=-1)
gs_clf = gs_clf.fit(reviews.data[:400], reviews.target[:400])
print(reviews.target_names[gs_clf.predict(['God is love'])[0]])
print(gs_clf.best_score_ )
for param_name in sorted(parameters.keys()):
print("%s: %r" % (param_name, gs_clf.best_params_[param_name]))
| true |
0e02e416a920f06dc514b6aa2f8117233f0f7ecc
|
Python
|
cut3my/PythonExe
|
/coffeemachine/coffeemachine.py
|
UTF-8
| 805 | 2.984375 | 3 |
[] |
no_license
|
from data import MENU, resources
import os
def clear():
if os.name == "nt":
_ = os.system('cls')
def is_rss_suff(order):
for item in order:
if order[item] >= resources[item]:
print(f"Sorry there is not enough {item}")
return False
return True
profit = 0
is_on = True
while is_on:
user_pref = input("What would you like? (Espresso/Latte/Cappucino)\n").lower()
if user_pref == "off":
is_on = False
elif user_pref == "report":
print(f"Water: {resources['water']}ml")
print(f"Milk: {resources['milk']}ml")
print(f"Coffee: {resources['coffee']}g")
print(f"Money: ${profit}")
else:
drink = MENU[user_pref]
is_rss_suff(drink['ingredients'])
| true |
d55f0b79f445481c8f9ae97bee130f9b85a16602
|
Python
|
Skycker/opensky_toy_api
|
/tests/test_api.py
|
UTF-8
| 1,961 | 2.921875 | 3 |
[
"MIT"
] |
permissive
|
import unittest
from unittest.mock import Mock, patch
from requests.exceptions import ConnectionError, Timeout
from opensky_toy_api import AirplaneState, OpenSkyApi, OpenSkyApiException
class TestAirplaneState(unittest.TestCase):
def test_getting_distance_valid(self):
first_point = (54.7800533, 31.8598796)
second_point = (55.7494733, 37.3523218)
state = AirplaneState(callsign='test plane', latitude=first_point[0], longitude=first_point[1])
self.assertEqual(state.get_distance_to(*second_point), 364.2)
def test_getting_distance_empty(self):
test_point = (1, 2)
state_without_latitude = AirplaneState(callsign='test plane', latitude=None, longitude=3)
state_without_longitude = AirplaneState(callsign='another test plane', latitude=4, longitude=None)
self.assertIs(state_without_latitude.get_distance_to(*test_point), None)
self.assertIs(state_without_longitude.get_distance_to(*test_point), None)
class TestOpenskyApi(unittest.TestCase):
def test_getting_planes_errors(self):
api = OpenSkyApi()
error_mocks = [Mock(side_effect=ConnectionError), Mock(side_effect=Timeout)]
for mock in error_mocks:
with patch('requests.get', mock):
with self.assertRaises(OpenSkyApiException):
api.get_states()
def test_getting_planes_near(self):
test_point = (54.7800533, 31.8598796)
test_radius = 50
plain_near = AirplaneState('test', 54.8566007, 32.014617)
plain_far_away = AirplaneState('test', -27.3818631, 152.7130084)
mock = Mock(return_value=[plain_near, plain_far_away])
with patch('opensky_toy_api.OpenSkyApi.get_states', mock):
api = OpenSkyApi()
states_near = api.get_states_near_place(*test_point, test_radius)
self.assertIn(plain_near, states_near)
self.assertNotIn(plain_far_away, states_near)
| true |
db0588380edf9f48d4bb9153f00b51722c4a285d
|
Python
|
lucasfreire017/Desafios_Python
|
/Exercício Python #115 - Cadastro de pessoas - A/main.py
|
UTF-8
| 3,119 | 3.390625 | 3 |
[
"MIT"
] |
permissive
|
from Desafio_115_a.arquivo import pessoas
from time import sleep
# Tratamento de erro caso o arquivo não exista
try:
arquivo = open('bd/pessoas.txt', 'r', encoding='utf-8')
except FileNotFoundError:
arquivo = open('bd/pessoas.txt', 'w', encoding='utf-8')
arquivo.close()
def titulo(msg, cor=36):
"Exibição elegante dos menus"
print('\033[1m-''\033[m' * 40)
print(f'\033[{cor};1m{msg:^40}\033[m')
print('\033[1m-''\033[m' * 40)
# Programa Principal
while True:
# ----- Menu Principal -----1
titulo('MENU PRINCIPAL')
print('\033[32;1m1 -\033[m Ver pessoas cadastradas')
print('\033[32;1m2 -\033[m Cadastrar nova pessoa')
print('\033[32;1m3 -\033[m Sair do sistema\n')
while True:
try:
opcao_select = int(input('Digite sua opção: '))
if opcao_select > 0 and opcao_select < 4:
break
else:
print('\n\033[31mErro! Digite uma opção entre 1 e 3\033[m')
except (ValueError, TypeError, KeyboardInterrupt, Exception):
print('\n\033[31mErro! Valor inválido. Por favor, digite uma opção válida\033[m')
# ----- Leitura de Pessoas -----
if opcao_select == 1:
# LIMPAR TELA
titulo('LISTA DE PESSOAS', 32)
print(f'{pessoas.leia()}\n')
sleep(3)
# ----- Cadastro de Pessoas -----
if opcao_select == 2:
# LIMPAR TELA
titulo('CADASTRO DE PESSOAS', 31)
while True:
# Tratamento de erros (Nome)
while True:
try:
nome = str(input('Nome: ')).title().strip()
if pessoas.validarNome(nome) == False:
print('\033[31mDigite um nome válido\033[m')
else:
break
except (KeyboardInterrupt):
print('\033[31mNome inválido')
# Tratamento de erros (Idade)
while True:
try:
idade = int(input('Idade: '))
erro = False
if idade <= 0:
print('\033[31mDigite uma idade válida\033[m')
erro = True
elif idade >150:
print('\033[31mDigite uma idade válida\033[m')
erro = True
except (ValueError, TypeError, KeyboardInterrupt):
print('\033[31mDigite uma idade válida\033[m')
else:
if erro == False:
break
break
# ----- Animação -----
print('\033[1m\nSALVANDO DADOS...\033[m')
sleep(1.5)
print('\033[32;1mDADOS SALVOS COM SUCESSO!\033[m')
sleep(1)
pessoas.cadastro(nome, idade)
# ----- Saída do Programa -----
if opcao_select == 3:
print('\033[1mENCERRANDO...\033[m')
sleep(3)
print('\033[1m\n----- OBRIGADO POR USAR O PROGRAMA -----\033[m')
print('\033[1m VOLTE SEMPRE\033[m')
sleep(1)
break
| true |
af1c654a0c03c2ed0621c22662b9bf646fbef096
|
Python
|
sidkrishna92/survModels_Insurance
|
/readPreprocess.py
|
UTF-8
| 2,033 | 3.0625 | 3 |
[] |
no_license
|
import pandas as pd
class readPreprocess():
"""
Read data from Input Files
Pre-process and clean data
"""
def __init__(self, filename):
self.filename = filename
self.data_df = pd.DataFrame()
self.filter_df = pd.DataFrame()
self.read_data()
# self.preprocess()
def read_data(self):
print("Reading Input Data ....")
self.data_df = pd.read_csv(self.filename)
#return self.data_df
def preprocess(self,dataframe):
print("Filtering out required columns and cleaning data ...")
import datetime as dt
self.data_df = dataframe
filter_df1 = self.data_df.iloc[:,[50, 64, 5, 6, 18, 19, 21, 49]]
##Selecting unique rows by using distinct Loan ID
filter_df1 = filter_df1.drop_duplicates('LoanKey')
##Filter Loan Statuses useful for survival analysis
statusOfInterest = ["Completed", "Current", "ChargedOff", "Defaulted", "Cancelled"]
filter_df2 = filter_df1.loc[filter_df1['LoanStatus'].isin(statusOfInterest)]
##Assign Boolean (~Indicator for surv models) for Defaulted/ChargedOff Loan types
# 0 = Censored Data, 1 = Non-censored(event) data
def bool(row):
if row.LoanStatus == 'Defaulted' or row.LoanStatus == 'ChargedOff':
return 1
else:
return 0
filter_df2.loc[:,'status'] = filter_df2.apply(bool, axis=1)
##Assign final dates fr all current loan types with nan
filter_df2['ClosedDate'] = filter_df2['ClosedDate'].fillna("2018-07-03 00:00:00")
filter_df2['ClosedDate'] = pd.to_datetime(filter_df2['ClosedDate'])
filter_df2['LoanOriginationDate'] = pd.to_datetime(filter_df2['LoanOriginationDate'])
filter_df2['timeDiff'] = filter_df2['ClosedDate'] - filter_df2['LoanOriginationDate']
filter_df3 = filter_df2[(filter_df2['timeDiff'].dt.days > 0)]
self.filter_df = filter_df3[filter_df3['LoanOriginationDate'].dt.year == 2006]
| true |
c3e0d313428da49aea693e39b48d77658cf92163
|
Python
|
Mossata/Car-Go-Vroom-Vroom
|
/Sprites-Background-Classes.py
|
UTF-8
| 2,827 | 3.6875 | 4 |
[] |
no_license
|
# links:
# https://stackoverflow.com/questions/60387843/having-a-sprite-follow-a-line-of-a-specific-color-pygame
import pygame as pg
#83b925 - number for green
#7f7f7f - number for grey
#Loading Backgrounds
pg.init()
background = pg.transform.smoothscale(pg.image.load("green.png"), (1370,710))
bg_size = background.get_size()
screen = pg.display.set_mode(bg_size)
screen.blit(background, (0, 0))
overground = pg.transform.smoothscale(pg.image.load("track.png"), (1370,710))
track_size = overground.get_size()
track = pg.display.set_mode(track_size)
pg.display.set_caption("First Game")
screenWidth = 1370
vel = 5
class player():
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.vel = vel
self.left = False
self.right = False
self.up = False
self.down = False
def draw(self, screen) :
if self.left == True:
screen.blit(walkLeft, (self.x,self.y))
elif self.right == True:
screen.blit(walkRight, (self.x,self.y))
elif self.up == True:
screen.blit(walkUp, (self.x,self.y))
elif self.down == True:
screen.blit(walkDown, (self.x,self.y))
#else:
#screen.blit(walkRight, (x,y))
#Loading Sprites
walkRight = pg.image.load("R1.png")
walkLeft = pg.image.load("L1.png")
walkUp = pg.image.load("U1.png")
walkDown = pg.image.load("D1.png")
def redrawGameWindow():
screen.blit(background, (0, 0))
screen.blit(overground, (0, 0))
#Runs draw method from player class:
P1.draw(screen)
pg.display.update()
# Creates Player 1 Object
P1 = player(20, 400, 34, 56)
run = True
while run:
pg.time.delay(100)
for event in pg.event.get():
if event.type == pg.QUIT:
run = False
keys = pg.key.get_pressed()
if keys[pg.K_LEFT] and P1.x > P1.vel:
P1.x-= P1.vel
P1.left = True
P1.right = False
P1.up = False
P1.down = False
if keys[pg.K_RIGHT] and P1.x < screenWidth - P1.width - P1.vel:
P1.x+= P1.vel
P1.left = False
P1.right = True
P1.up = False
P1.down = False
if keys[pg.K_UP] and P1.y > P1.vel:
P1.y-= P1.vel
P1.left = False
P1.right = False
P1.up = True
P1.down = False
if keys[pg.K_DOWN] and P1.y < screenWidth - P1.height - P1.vel:
P1.y += P1.vel
P1.left = False
P1.right = False
P1.up = False
P1.down = True
#else:
# left = False
# right = False
# up = False
# down = False
redrawGameWindow()
pg.quit()
| true |
ec18908420948b9c8ab3329021ca90287421f1fe
|
Python
|
parthpankajtiwary/codeforces
|
/round287/A.py
|
UTF-8
| 399 | 2.6875 | 3 |
[] |
no_license
|
n, k = map(int, raw_input().split())
a = [int(x) for x in raw_input().split()]
s = [int(x) for x in a]
a.sort()
indices = []
count = 0
sum = 0
indexRemoved = 0
for x in a:
if sum <= k and (sum + x) <= k:
sum += x
count += 1
if s.index(x) not in indices:
indices.append(s.index(x))
s = s[:s.index(x)] + ["#"] + s[s.index(x)+1:]
print s
print count
for x in indices:
print x+1,
| true |
a5a4affa934ab66d26c230299a762bb3845da157
|
Python
|
ottogroup/dstoolbox
|
/dstoolbox/pipeline.py
|
UTF-8
| 20,835 | 2.640625 | 3 |
[
"Apache-2.0"
] |
permissive
|
"""Extend sklearn's Pipeline and FeatureUnion."""
import itertools
from functools import wraps
import time
import types
import warnings
import numpy as np
import pandas as pd
from scipy import sparse
from sklearn.pipeline import _transform_one
from sklearn.pipeline import _fit_transform_one
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Parallel
from sklearn.pipeline import Pipeline
from sklearn.pipeline import delayed
from sklearn.utils import tosequence
from sklearn.utils.metaestimators import if_delegate_has_method
class PipelineY(Pipeline):
"""Extension of sklearn Pipeline with tranformer for y values.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
memory : Instance of sklearn.external.joblib.Memory or string, optional \
(default=None)
Used to cache the fitted transformers of the pipeline. By
default, no caching is performed. If a string is given, it is
the path to the caching directory. Enabling caching triggers a
clone of the transformers before fitting. Therefore, the
transformer instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
y_transformer : transformer object
Transformer object that transforms the y values (e.g.,
discretiziation). May optionally support inverse_transform
method.
predict_use_inverse : bool (default=False)
Determine if ``predict`` should use the inverse transform of
y_transformer on the output.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
"""
def __init__(
self,
steps,
y_transformer,
predict_use_inverse=False,
**kwargs
):
warnings.warn(DeprecationWarning(
"PipelineY is deprecated and will be removed in a future release. "
"Please use sklearn.compose.TransformedTargetRegressor instead."
))
self.y_transformer = y_transformer
self.predict_use_inverse = predict_use_inverse
super().__init__(steps=steps, **kwargs)
if not hasattr(y_transformer, "transform"):
raise TypeError("y_transform should have a transform method.")
def y_transform(self, y):
"""Calls transform method on transformer object.
Parameters
----------
y : iterable
Targets.
Returns
-------
yt : iterable
Transformed targets.
"""
return self.y_transformer.transform(y)
def y_inverse_transform(self, yt):
"""If available, transformed target values are transformed back to the
original representation.
Parameters
----------
yt : iterable
Transformed targets.
Returns
-------
y : iterable
Original targets.
"""
return self.y_transformer.inverse_transform(yt)
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator. Target
values are tranformed before being passed to original fit method.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
self.y_transformer.fit(y)
yt = self.y_transform(y)
return super().fit(X, yt, **fit_params)
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform
the data, then use fit_transform on transformed data using the
final estimator. Target values are tranformed before being
passed to original fit method.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
self.fit(X, y, **fit_params)
return self.transform(X)
# pylint: disable=arguments-differ
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X, inverse=None):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of
first step of the pipeline.
inverse : bool, default: None
Whether to apply inverse_transform on predicted values.
If not provided, I will use ``predict_use_inverse`` to
determine whether the inverse transform should be applied.
"""
if inverse is None:
inverse = self.predict_use_inverse
y_pred = super().predict(X)
if inverse:
y_pred = self.y_inverse_transform(y_pred)
return y_pred
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score. Target values are tranformed before being
passed to original score method.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable
Targets used for scoring. Must fulfill label requirements
for all steps of the pipeline.
"""
yt = self.y_transform(y)
return super().score(X, yt)
def get_params(self, deep=True):
# BBB This is not required for scikit-learn 0.17
out = super().get_params(deep)
out['steps'] = self.steps
out['y_transformer'] = self.y_transformer
return out
class SliceMixin:
"""Allows more comfortable access to steps of Pipeline or
FeatureUnion.
Create a new class that subclasses Pipeline or FeatureUnion and
this. That class allows to:
1) access by name (e.g. pipeline['clf'])
2) access by index (e.g. pipeline[-1])
3) access by slice (e.g. pipeline[:3])
Example
-------
>>> class SlicePipeline(SliceMixin, Pipeline):
>>> pass
"""
def __getitem__(self, idx):
container = (getattr(self, 'steps', False) or
getattr(self, 'transformer_list', False))
if not container:
raise AttributeError("SliceMixin requires a 'steps' or a "
"'transformer_list' attribute.")
if isinstance(idx, str):
return dict(container)[idx]
if isinstance(idx, slice):
return container[idx]
return container[idx][1]
class DictFeatureUnion(FeatureUnion):
"""This is like sklearn's FeatureUnion class, but intead of
stacking the final features, merge them to a dictionary.
The dictionaries keys correspond to the transformer step names, the
values to the result of the transformation. Name collisions are not
resolved, the user has to take care not to duplicate names.
DictFeatureUnions can be nested.
Parameters
----------
transformer_list : list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs : int, optional
Number of jobs to run in parallel (default 1).
transformer_weights : dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def _update_transformed_dict(self, Xs):
Xt = {}
for (name, _), xs in zip(self.transformer_list, Xs):
if isinstance(xs, dict):
Xt.update(xs)
else:
Xt[name] = xs
return Xt
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and
merge results into a dictionary.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
y : iterable, default=None
Training targets.
**fit_params : dict, optional
Parameters to pass to the fit method.
Returns
-------
Xt : dict
Dictionary with the step names as keys and transformed
data as values.
"""
self._validate_transformers()
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, X, y, weight,
**fit_params)
for _, trans, weight in self._iter())
if not result:
# All transformers are None
return {}
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
Xt = self._update_transformed_dict(Xs)
return Xt
def transform(self, X):
"""Transform X separately by each transformer, merge results
into a dictionary.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
Xt : dict
Dictionary with the step names as keys and transformed
data as values.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, X, None, weight)
for name, trans, weight in self._iter())
if not Xs:
# All transformers are None
return {}
Xt = self._update_transformed_dict(Xs)
return Xt
class DataFrameFeatureUnion(FeatureUnion):
"""Extends FeatureUnion to output Pandas Dataframe.
Modified FeatureUnion that outputs a pandas dataframe if all
transformers output a dataframe.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
ignore_index: boolean, optional
Strips all indexs from all dataframes before concatenation.
copy: boolean, optional
Set copy-Parameter of pandas concat-Function.
keep_original: bool (default=False)
If True, instead of only returning the transformed data,
concatenate them to the original data and return the
result.
"""
def __init__(
self,
transformer_list,
n_jobs=1,
transformer_weights=None,
verbose=False,
ignore_index=True,
copy=True,
keep_original=False,
):
self.ignore_index = ignore_index
self.copy = copy
self.keep_original = keep_original
super().__init__(
transformer_list=transformer_list,
n_jobs=n_jobs,
transformer_weights=transformer_weights,
verbose=verbose,
)
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and
concatenate results.
Parameters
----------
X : array-like, sparse matrix or dataframe,
shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like, sparse matrix or dataframe,
shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
self._validate_transformers()
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, X, y, weight,
**fit_params)
for _, trans, weight in self._iter())
if not result:
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*result)
if self.keep_original:
Xs = list(itertools.chain([X], Xs))
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
elif all(isinstance(f, (pd.DataFrame, pd.Series)) for f in Xs):
if self.ignore_index:
Xs = [f.reset_index(drop=True) for f in Xs]
Xs = pd.concat(Xs, axis=1, copy=self.copy)
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate
results.
Parameters
----------
X : array-like, sparse matrix or dataframe,
shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like, sparse matrix or dataframe,
shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, X, None, weight)
for _, trans, weight in self._iter())
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
if self.keep_original:
Xs = list(itertools.chain([X], Xs))
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
elif all(isinstance(f, (pd.DataFrame, pd.Series)) for f in Xs):
if self.ignore_index:
Xs = [f.reset_index(drop=True) for f in Xs]
Xs = pd.concat(Xs, axis=1, copy=self.copy)
else:
Xs = np.hstack(Xs)
return Xs
def timing_decorator(
est,
name,
method_name,
sink=print,
):
"""Decorator that wraps the indicated method of the estimator into
a wrapper that measures time.
By default, the outputs are just printed to the console. They take a
form that allows the user to parse each line as a dict or json.
est : sklearn.BaseEstimator
An sklearn estimator that is part of the profiled pipeline
steps.
name : str
Name to be displayed; by default, the name given in the `steps`
parameter of the pipeline.
method_name : str
Method to be profiled; either one of 'fit', 'transform',
'fit_transform', 'predict', 'predict_proba'.
sink : callable (default=print)
A callable that the profiling message is sent to; e.g. the print
function or a logger.
"""
func = getattr(est, method_name)
@wraps(func)
def wrapper(*args, **kwargs):
"""Measure time of method call and send message to sink."""
tic = time.time()
result = func(*args[1:], **kwargs)
toc = time.time()
s_name = '"name": {:<30}'.format('"' + name[:28] + '"')
s_method = '"method": {:<20}'.format('"' + method_name[:18] + '"')
s_dura = '"duration": {:>12.3f}'.format(toc - tic)
s_shape_tmpl = '"shape": {:<}'
try:
shape = result.shape
shape_x = '"' + 'x'.join(map(str, shape)) + '"'
s_shape = s_shape_tmpl.format(shape_x)
except AttributeError:
s_shape = s_shape_tmpl.format('"-"')
out = f'{s_name}, {s_method}, {s_dura}, {s_shape}'
sink("{" + out + "}")
return result
# pylint: disable=protected-access
wrapper._has_timing = True
return wrapper
def _add_timed_sequence(steps, sink):
"""For each step in steps, decorate its relevant methods."""
seq = tosequence(steps)
method_names = ('fit', 'transform', 'fit_transform', 'predict',
'predict_proba')
for name, step in seq:
for method_name in method_names:
old_func = getattr(step, method_name, None)
# pylint: disable=protected-access
if not old_func or hasattr(old_func, '_has_timing'):
continue
new_func = timing_decorator(step, name, method_name, sink)
setattr(
step,
new_func.__name__,
types.MethodType(new_func, step),
)
return seq
def _shed_timed_sequence(steps):
"""For each step in steps, remove the decorator."""
method_names = ('fit', 'transform', 'fit_transform', 'predict',
'predict_proba')
for _, step in steps:
for method_name in method_names:
if not hasattr(step, method_name):
continue
decorated = getattr(step, method_name)
closure = decorated.__closure__
if closure:
meth = closure[0].cell_contents
setattr(step, meth.__name__, meth)
class TimedPipeline(Pipeline):
"""Timed pipeline of transforms with a final estimator.
Note: In contrast to sklearn.pipeline.Pipeline, this additionally
prints information about how long each fit, transformation, and
prediction step took. Although sklearn's Pipeline has a verbose
argument since 0.21 which also prints how long transformation
steps took, the functionality is not exactly the
same. E.g. TimedPipeline also prints results from prediction and
allows to pass in a custom sink for the logs.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
memory : Instance of sklearn.external.joblib.Memory or string, optional \
(default=None)
Used to cache the fitted transformers of the pipeline. By
default, no caching is performed. If a string is given, it is
the path to the caching directory. Enabling caching triggers a
clone of the transformers before fitting. Therefore, the
transformer instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
sink : callable (default=print)
The target where the string messages are sent to. Is print by
default but could, for example, be switched to a logger.
verbose : boolean, optional(default=False)
If True, the time elapsed while fitting each transformer will
be printed as it is completed. Note: This is sklearn
functionality, not dstoolbox.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
"""
def __init__(self, steps, memory=None, verbose=False, sink=print):
# pylint: disable=super-init-not-called
self.steps = _add_timed_sequence(steps, sink)
self.sink = sink
self.memory = memory
self.verbose = verbose
self._validate_steps()
def __setstate__(self, state):
state['steps'] = _add_timed_sequence(state['steps'], state['sink'])
self.__dict__.update(state)
def shed_timing(self):
"""Call this if you want to get rid of timing messages."""
_shed_timed_sequence(self.steps)
def add_timing(self):
"""Call this if you want to re-apply timing messages (after
having called `shed_timing`).
"""
self.steps = _add_timed_sequence(self.steps, self.sink)
| true |
ab1853170e4604f7fcb87c888f16e537cf8dca5a
|
Python
|
youhusky/Facebook_Prepare
|
/121. Best Time to Buy and Sell Stock.py
|
UTF-8
| 1,059 | 3.828125 | 4 |
[
"MIT"
] |
permissive
|
# Say you have an array for which the ith element is the price of a given stock on day i.
# If you were only permitted to complete at most one transaction (ie, buy one and sell one share of the stock), design an algorithm to find the maximum profit.
# Example 1:
# Input: [7, 1, 5, 3, 6, 4]
# Output: 5
# max. difference = 6-1 = 5 (not 7-1 = 6, as selling price needs to be larger than buying price)
# Example 2:
# Input: [7, 6, 4, 3, 1]
# Output: 0
# In this case, no transaction is done, i.e. max profit = 0.
# DP question
class Solution(object):
def maxProfit(self, prices):
"""
O(n)
:type prices: List[int]
:rtype: int
"""
if not prices:
return 0
# global max
res = 0
# global min
sofar_min = prices[0]
# init
for each in range(len(prices)):
if prices[each] < sofar_min:
sofar_min = prices[each]
else:
res = max(res, prices[each] - sofar_min)
return res
| true |
f5dcd0cfa3cb196dac384bffe47f32a5eb744b9c
|
Python
|
witalomonteiro/compilador_de_Jogos
|
/forca.py
|
UTF-8
| 2,207 | 3.828125 | 4 |
[] |
no_license
|
import random
def jogar():
print("\n***************************************")
print("***** Bem-Vindo ao Jogo da Forca ******")
print("***************************************")
palavras = carregar_palavras("palavras.txt")
palavra_secreta = sortear_palavra(palavras)
palavra_dica = criar_dica(palavra_secreta)
solicitar_letra_palpite(palavra_secreta, palavra_dica)
# Functions
def carregar_palavras (nome_arquivo):
palavras = []
with open(nome_arquivo, "r", encoding="utf-8") as arquivo:
for linha in arquivo:
palavras.append(linha.strip().lower())
return palavras
def sortear_palavra(palavras):
random.seed()
palavra_secreta = random.choice(palavras)
return palavra_secreta
def criar_dica(palavra_secreta):
palavra_dica = len(palavra_secreta) * "-"
return palavra_dica
def solicitar_letra_palpite(palavra_secreta, palavra_dica):
tentativas = 3
print("\nDica: Frutas")
for rodada in range(1, tentativas + 1):
print(f"\nTentativa {rodada} de {tentativas}")
# print(palavra_secreta)
print(f"Palavra Secreta: {palavra_dica}")
letra_palpite = input("Digite seu palpite: ").strip().lower()
palavra_dica = validar_letra_palpite(palavra_secreta, palavra_dica, letra_palpite)
validar_palavra_secreta(palavra_secreta, palavra_dica)
def validar_letra_palpite(palavra_secreta, palavra_dica, letra_palpite):
if letra_palpite in palavra_secreta:
palavra_dica = list(palavra_dica)
for id, letra in enumerate(palavra_secreta):
if letra_palpite == letra:
# print(id)
palavra_dica[id] = letra_palpite
palavra_dica = "".join(palavra_dica)
else:
print(f"Letra não encontrada!")
return palavra_dica
def validar_palavra_secreta(palavra_secreta, palavra_dica):
palavra_palpite = input(f"\nQual é a palavra secreta '{palavra_dica}' ? ").strip().lower()
if palavra_palpite == palavra_secreta:
print("Parabéns! Você Acertou!")
else:
print("Errou! Fim de Jogo!")
if (__name__ == "__main__"):
jogar()
| true |
e1086b3f54e9f5d9f3be317fb56e419d9cba19e2
|
Python
|
ag300g/code_review_20180930
|
/4/R2F/submission.py
|
UTF-8
| 14,016 | 2.875 | 3 |
[] |
no_license
|
#!/usr/bin/env python
# -*- coding:UTF-8 -*-
'''
Sample submission for 2nd round competition.
'''
import pandas as pd
import numpy as np
# import all modules been used
class UserPolicy:
def __init__(self, initial_inventory, sku_cost):
self.inv = initial_inventory
self.costs = sku_cost
self.extra_shipping_cost_per_unit = 0.01
self.fixed_replenish_cost_per_order = 0.1
self.sku_limit = np.asarray([300, 300, 300, 300, 300])
self.capacity_limit = np.asarray([4000, 5000, 6000, 2000, 1000])
self.abandon_rate = np.asarray([1. / 100, 7. / 100, 10. / 100, 9. / 100, 8. / 100])
self.decision_record = []
def daily_decision(self, t):
'''
daily decision of inventory allocation
input values:
t, decision date
return values:
inventory decision, 2-D numpy array, shape (6,1000), type integer
'''
# Your algorithms here
# simple rule: no replenishment or transshipment
capacity_limit = self.capacity_limit
abandon_rate = self.abandon_rate
df_inv = self.inv.sort_values(['dc_id', 'item_sku_id'])
dc_list = list(range(6))
array_inv = np.asarray(
[df_inv.loc[df_inv.dc_id == dcid].stock_quantity.values for dcid in dc_list]) # 将初始库存数据整理成6*1000矩阵
sort_abandon_rate = np.argsort(-self.abandon_rate) # 对abandon rate按升序排列
costs = self.costs.set_index(['item_sku_id'])
costs = costs.sort_index().values
stockout_cost = costs[:, 0]
holding_cost = costs[:, 1]
mean_history = pd.read_csv("mean_test_cv10.csv").set_index(['dc_id', 'item_sku_id', 'date'])
mean_history = mean_history[["quantity"]].unstack(level=-1)
mean_history = mean_history.values # 读入预测的销量均值数据,并将其整理成6000*1矩阵,此数据后续将用于对我们预测的销量数据进行修正
mean_history1 = pd.read_csv("meantwo_test_cv29.csv").set_index(['dc_id', 'item_sku_id', 'date'])
mean_history1 = mean_history1[["quantity"]].unstack(level=-1)
mean_history1 = mean_history1.values # 读入预测的前31天的销量均值,并将其整理成6000*1矩阵,此数据后续将用于对我们预测的销量数据进行修正
mean_history2 = pd.read_csv("meantwo2_test_cv23.csv").set_index(['dc_id', 'item_sku_id', 'date'])
mean_history2 = mean_history2[["quantity"]].unstack(level=-1)
mean_history2 = mean_history2.values # 读入预测的后30天的销量均值,并将其整理成6000*1矩阵,此数据后续将用于对我们预测的销量数据进行修正
sku_demand = pd.read_csv("lgb_test_cv049.csv").set_index(['dc_id', 'item_sku_id', 'date'])
sku_demand = sku_demand[["quantity"]].unstack(level=-1)
sku_demand = sku_demand.values
array_sku_mean = sku_demand.reshape(6, 1000, 61) # 读入预测的61天的销量数据,并将其整理成6*1000*61的矩阵,后续会对该数据进行修正
a_std = np.std(array_sku_mean, axis=2)
a_std = a_std.reshape(6, 1000, 1)
a_std = a_std.repeat(61, axis=2) # 计算预测的销量数据的标准差
std_all = pd.read_csv("std.csv").values # 读入计算的每个商品在所有历史数据上的标准差,此数据后续将用于对我们预测的销量数据进行修正
std_all1 = std_all[:, :] + 1 - 1
std_abn = std_all > 100 # 将标准差>100的商品记录为std_abn
std_all[std_all > 30] = 30 # 在std_all中将大于30的数据记录为30,因为有些商品在历史数据上波动较大,但是在我们的预测时间范围内不一定会波动很大
std_all = std_all[:, 1:].reshape(6, 1000, 1).repeat(61, axis=2)
std_f = np.maximum(std_all / 17, a_std) # 取修正之后的预测数据与历史数据的较大者
std_f[std_f > 10] = 10 # 在std_f中将大于10的数据记录为10,此数据后续将用于对我们预测的销量数据进行修正
history = mean_history.reshape(6, 1000, 1).repeat(61, axis=2)
history1 = mean_history1.reshape(6, 1000, 1).repeat(31, axis=2)
history2 = mean_history2.reshape(6, 1000, 1).repeat(30, axis=2)
history0 = np.zeros((6, 1000, 61))
history0[:, :, :31] = np.maximum(history[:, :, :31], history1)
history0[:, :, 31:] = np.maximum(history[:, :, 31:], history2) # history0记录了我们预测的销量均值数据信息
array_sku_mean = 1.55 * np.maximum(array_sku_mean, history0) + 4 * std_f # 根据预测的均值数据以及计算得到的标准差数据对我们的销量数据进行了修正
sort_abandon_rate = np.argsort(-abandon_rate) # 对abandon rate排序,得到的排序会作为调拨时FDC的优先级顺序
inv_mean = np.zeros((6, 1000))
sku_surplus = np.zeros(1000) # 定义RDC可调商品数量矩阵
sku_shortage = np.zeros((5, 1000)) # 定义FDC需求商品数量矩阵
# 开始调拨
if t < 59:
alpha = 3
else:
alpha = 62 - t # t<=59时,我们初步考虑一次给每个FDC调拨4天的需求量,t=60初步考虑一次给每个FDC调拨3天的需求量,t=61初步考虑一次给每个FDC调拨2天的需求量
rdc_alpha = 2 # RDC给自己留下满足当天需求的量的2倍,其余的部分是可以用于调拨的
inventory_decision = np.zeros((6, 1000)).astype(int) # 定义调拨矩阵
end_day = min(t + alpha, 61)
sku_surplus = (array_inv[0, :] - np.minimum(array_inv[0, :], rdc_alpha * array_sku_mean[0, :, t - 1])).astype(
int) # 每天给RDC留下满足自己的需求量的2倍之后剩余的数量,这一部分是可以用来给FDC调拨的量
inv_mean = np.minimum(array_inv[:, :], np.sum(array_sku_mean[:, :, t - 1:end_day], axis=2))
sku_shortage = np.rint(np.sum(array_sku_mean[1:, :, t - 1:end_day], axis=2) - inv_mean[1:,
:]) # 计算在现有库存量的基础上,若要满足我们初步考虑的FDC的需求量,每个FDC需要RDC调拨的商品数量
not_sat = sku_surplus - np.sum(sku_shortage, axis=0) < 0 # 记录下RDC中可调拨量不能满足5个FDC缺货量之和的SKU
sku_shortage[:, not_sat] = sku_shortage[:, not_sat] * np.tile(
sku_surplus[not_sat] / np.sum(sku_shortage[:, not_sat], axis=0),
(5, 1)) # 对于RDC中可调拨量足以满足5个FDC缺货量之和的SKU,按照FDC缺货量进行调拨;对于RDC中可调拨量不能满足5个FDC缺货量之和的SKU,按照缺货比例进行调拨
# 调整shortage,万一库存中无货可以
sku_shortage_4day = np.rint(
np.sum(array_sku_mean[1:, :, t - 1:t + 3], axis=2) - inv_mean[1:, :]) # case1:对于每个FDC每个SKU考虑4天的需求
not_sat = sku_surplus - np.sum(sku_shortage_4day, axis=0) < 0
sku_shortage[:, not_sat] = sku_shortage_4day[:, not_sat] * np.tile(
sku_surplus[not_sat] / np.sum(sku_shortage_4day[:, not_sat], axis=0), (5, 1))
sku_shortage_3day = np.rint(
np.sum(array_sku_mean[1:, :, t - 1:t + 2], axis=2) - inv_mean[1:, :]) # case1:对于每个FDC每个SKU考虑3天的需求
sku_shortage_3day[sku_shortage_3day < 0] = 0
not_sat = sku_surplus - np.sum(sku_shortage_3day, axis=0) < 0
sku_shortage[:, not_sat] = sku_shortage_3day[:, not_sat] * np.tile(
sku_surplus[not_sat] / np.sum(sku_shortage_3day[:, not_sat], axis=0), (5, 1))
sku_shortage_2day = np.rint(
np.sum(array_sku_mean[1:, :, t - 1:t + 1], axis=2) - inv_mean[1:, :]) # case1:对于每个FDC每个SKU考虑2天的需求
sku_shortage_2day[sku_shortage_2day < 0] = 0
not_sat = sku_surplus - np.sum(sku_shortage_2day, axis=0) < 0
sku_shortage[:, not_sat] = sku_shortage_2day[:, not_sat] * np.tile(
sku_surplus[not_sat] / np.sum(sku_shortage_2day[:, not_sat], axis=0), (5, 1))
sku_shortage_1day = np.rint(array_sku_mean[1:, :, t - 1] - inv_mean[1:, :]) # case1:对于每个FDC每个SKU考虑1天的需求
sku_shortage_1day[sku_shortage_1day < 0] = 0
not_sat = sku_surplus - np.sum(sku_shortage_1day, axis=0) < 0
sku_shortage[:, not_sat] = sku_shortage_1day[:, not_sat]
# 开始按照我们的排序进行调拨
for i in sort_abandon_rate: # 按照abandon_rate由高到低的顺序进行调货
sku_shortage[i, :] = np.minimum(sku_shortage[i, :], sku_surplus)
sku_shortage_1day = np.minimum(sku_shortage, sku_shortage_1day)
sku_shortage_2day = np.minimum(sku_shortage, sku_shortage_2day)
importance = (sku_shortage > 0) * (stockout_cost.reshape(1000, 1).repeat(5, axis=1).T) + \
7.5 * (stockout_cost.reshape(1000, 1).repeat(5, axis=1).T) * sku_shortage_1day + \
0.05 * sku_shortage_2day # 按照缺货量以及缺货成本计算每个FDC每个SKU的importance
importance[i, :] = importance[i, :] * (sku_shortage[i, :] > 0)
sort_importance = np.argsort(-importance[i, :]) # 对FDCi每种商品的重要性进行降序排列,这个顺序就是我们调拨的顺序
sku_cum = sku_shortage[i, sort_importance].cumsum() # 对FDCi按照sort_importance进行商品数量的累加
cum_more_than_cap = sku_cum <= capacity_limit[i] # 记录是否超过FDCi可调拨商品总量限制
kind_limit = min(sum(cum_more_than_cap), 300) # 记录在不超过FDCi商品总量限制以及商品种类限制的条件下能调拨的商品种类
inventory_decision[i + 1, sort_importance[:kind_limit]] = sku_shortage[
i, sort_importance[:kind_limit]] # 按照商品的shortage进行调拨
sku_surplus = sku_surplus - inventory_decision[i + 1, :] # 更新RDC可调拨商品数量
importance = 0.001 * stockout_cost - holding_cost # 按照缺货成本以及持有成本计算SKU的importance
sort_importance = np.argsort(-importance).astype(int) # 按照importance对商品进行排序
if t < 55:
demand_mean_3 = np.sum(array_sku_mean[:, :, t + 6:t + 7], axis=2) # 计算RDC以及5个FDC在第t+6天对每个商品的需求总量
demand_mean_hi = np.sum(array_sku_mean[:, :, t + 6:t + 9], axis=2) # 计算RDC以及5个FDC在第t+6,t+7,t+8天对每个商品的需求总量
demand_mean_hi1 = np.sum(array_sku_mean[:, :, t + 6:t + 10],
axis=2) # 计算RDC以及5个FDC在第t+6,t+7,t+8,t+9天对每个商品的需求总量
demand_mean_hi2 = np.sum(array_sku_mean[:, :, t + 6:t + 11],
axis=2) # 计算RDC以及5个FDC在第t+6,t+7,t+8,t+9,t+10天对每个商品的需求总量
demand_mean_3[:, sort_importance[:900]] = demand_mean_hi[:,
sort_importance[:900]] # 对于优先级位于800-900的商品我们考虑补够3天的需求量
demand_mean_3[:, sort_importance[:800]] = demand_mean_hi1[:,
sort_importance[:800]] # 对于优先级位于400-800的商品我们考虑补够4天的需求量
demand_mean_3[:, sort_importance[:400]] = demand_mean_hi2[:,
sort_importance[:400]] # 对于优先级最高的400种商品我们考虑补够5天的需求量
demand_mean_3[std_abn[:, 1:]] = demand_mean_3[std_abn[:, 1:]] + std_all1[:, 1:][
std_abn[:, 1:]] / 10 # 对于历史上标准差>100的商品的补货量进行调整,考虑对这些商品多补一些
demand_whole_3 = np.sum(demand_mean_3, axis=0) # 6个dc需求之和
demand_mean_7 = np.sum(array_sku_mean[:, :, t - 1:t + 6], axis=2) # 7天需求总量6*1000
demand_surplus = array_inv - demand_mean_7
left = demand_surplus[1:, :] + 1 - 1
left[left < 0] = 0 # 记录FDC中已有的库存是否能够满足未来7天的需求,不够的记为0
demand_surplus[1:, :][demand_surplus[1:, :] > 0] = 0 # 若FDC中已有的库存能够满足未来7天的需求则记为0
if t > 1:
start_time = max(t - 1 - 7, 0)
end_time = t
trans_sum = np.zeros((1, 1000))
for i in range(start_time, end_time):
trans_sum = trans_sum + self.decision_record[i - 1][0, :] # 过去‘8’天中的补货总量
demand_surplus_7 = trans_sum + np.sum(demand_surplus, axis=0)
else:
demand_surplus_7 = np.sum(demand_surplus, axis=0)
if t <= 4:
demand_mean_3 = np.sum(array_sku_mean[:, :, t + 6:t + 8], axis=2) # 前4天考虑补2天的需求量
demand_mean_3[std_abn[:, 1:]] = demand_mean_3[std_abn[:, 1:]] + std_all1[:, 1:][std_abn[:, 1:]] / 7
# demand_mean_7=np.sum(array_sku_mean[:,:,t-1:t+6],axis=2)#7天需求总量
demand_mean_3[1:, :] = demand_mean_3[1:, :] - left
demand_mean_3[demand_mean_3 < 0] = 0
demand_whole_3 = np.sum(demand_mean_3, axis=0) # 6个dc需求之和
demand_surplus_7[demand_surplus_7 < 0] = 0
inventory_decision[0, :] = np.rint(demand_whole_3 - np.minimum(demand_whole_3, demand_surplus_7))
self.decision_record.append(inventory_decision)
return inventory_decision
def info_update(self, end_day_inventory, t):
'''
input values: inventory information at the end of day t
'''
self.inv = end_day_inventory
def some_other_functions():
pass
| true |
e23aafd5ed87ad7d4bb97b28ed3078feba19bf83
|
Python
|
skywalker0803r/python101-lite
|
/FBCrawler/fb.py
|
UTF-8
| 491 | 2.625 | 3 |
[] |
no_license
|
import requests
import json
page_id = 'PAGE_ID' # 欲爬取 fans page id
access_token = 'YOUR_ACCESS_TOKEN' # 先到 https://developers.facebook.com/ 註冊一個 app,然後申請 Page Public Content Access 權限,之後可以從 https://developers.facebook.com/tools/explorer 取得 token
limit = 5 # 限制資料筆數
response = requests.get('https://graph.facebook.com/v3.2/{}?limit={}fields=id,posts&access_token={}'.format(page_id, limit, access_token))
print(response.json())
| true |
ec4da3ddff5e4c924afa18a8c1d26d286f528405
|
Python
|
Gnahue/sanFranciscoBiclas
|
/prediccion/modelo/algoritmo/build_trees.py
|
UTF-8
| 2,540 | 3.328125 | 3 |
[] |
no_license
|
from classes import Tree
from serialization import serialize_tree
from data_import import get_train
from predictions import get_tree_prediction
from predictions import write_csv
import time
import datetime
from data_import import get_test
def build_RF_trees(n, train, target, n_random_columns, max_depth, sample_size):
for i in range(0, n):
print ('CREANDO ARBOL ' + str(i))
print (datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S'))
tree = Tree(train.sample(sample_size), target, n_random_columns, max_depth)
serialize_tree(tree, (str(i) + 'RF_3_5.pkl'))
print ('<------------------------------------ARBOL NUEVO = ' + str(i) + ' ------------------------------------>')
print ('SE CREO EN: ' + str((time.time()-ts) / 60) + ' MINUTOS')
def build_RF_trees_prediction(n, train, target, n_random_columns, max_depth, sample_size):
test = get_test()
for i in range(0, n):
print('________________________________________________________')
print ('--------------> CREANDO ARBOL ' + str(i) +'...')
ts = time.time()
print ('--------------> INICIO: ' + str(datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')))
tree = Tree(train.sample(sample_size), target, n_random_columns, max_depth)
print ('--------------> ARBOL NUEVO: ' + str(i))
ts1 = time.time()
print ('--------------> SE CREO EN: ' + str(round((ts1 - ts) / 60,2)) + ' MINUTOS ')
print ('--------------> CALCULANDO PREDICCION...')
write_csv(get_tree_prediction(test, tree).items(), str(i) + 'prediction.csv') # aca va el nombre de como se crean los csv de resultados
print ('--------------> SE CALCULO PREDICCION EN: ' + str(round((time.time() - ts1) / 60,2)) + ' MINUTOS')
print ('--------------> DEMORO UN TOTAL DE: ' + str(round((time.time() - ts) / 60,2)) + ' MINUTOS')
def build_bagging_trees(n, train, target, max_depth, sample_size):
# toma todas las columnas para hacer el split = bagging
return build_RF_trees(n, train, target, (len(train.columns) - 1), max_depth, sample_size)
def main():
n = 500 # cantidad de arboles a crear
train = get_train()
n_random_columns = 3
max_depth = 5 # estudiar cual es la profundidad que funciona mejor
sample_size = int(round(len(train) / 4)) # sample with replacement
# build_bagging_trees(n, train, 'duration', max_depth, 5000)
build_RF_trees_prediction(n, train, 'duration', n_random_columns, max_depth, sample_size)
main()
| true |
03c42c93a4b3af472fd907dc881a4b238427ca9f
|
Python
|
crash-bandic00t/python_dev
|
/1_fourth/basics_python/lesson9/task5.py
|
UTF-8
| 3,963 | 3.9375 | 4 |
[] |
no_license
|
"""
Реализуйте базовый класс Car.
при создании класса должны быть переданы атрибуты: color (str), name (str).
реализовать в классе методы: go(speed), stop(), turn(direction),
которые должны изменять состояние машины -
для хранения этих свойств вам понадобятся дополнительные атрибуты - придумайте какие.
добавьте метод is_police() - который возвращает True/False,
в зависимости от того является ли этот автомобиль полицейским (см.дальше)
Сделайте несколько производных классов: TownCar, SportCar, WorkCar, PoliceCar;
Добавьте в базовый класс метод get_status(), который должен возвращать
в виде строки название, цвет, текущую скорость автомобиля и
направление движения (в случае если автомобиль едет), для полицейских
автомобилей перед названием автомобиля должно идти слово POLICE;
Для классов TownCar и WorkCar в методе get_status() рядом со
значением скорости должна выводиться фраза "ПРЕВЫШЕНИЕ!",
если скорость превышает 60 (TownCar) и 40 (WorkCar).
Создайте по одному экземпляру каждого производного класса.
В цикле из 10 итераций, для каждого автомобиля сделайте одно из
случайных действий: go, stop, turn со случайными параметрами.
После каждого действия показывайте статус автомобиля.
"""
from random import randint, choice
class Car:
speed_limit = 0
speed = 0
turn = 'straight'
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return f'{self.name} {self.color}'
def go(self, speed):
self.speed = speed
def stop(self):
self.speed = 0
self.turn = 'straight'
def turns(self, direction):
self.turn = direction
def speed_check(self):
if self.speed_limit != 0:
if self.speed > self.speed_limit:
return True
return False
def get_status(self):
if self.speed == 0:
result = f'{self} {self.speed}'
elif self.speed_check():
result = f'{self} {self.speed} ПРЕВЫШЕНИЕ! {self.turn}'
elif not self.speed_check():
result = f'{self} {self.speed} {self.turn}'
return result
class TownCar(Car):
speed_limit = 60
class WorkCar(Car):
speed_limit = 40
class PoliceCar(Car):
def __str__(self):
return f'POLICE {self.name} {self.color}'
class SportCar(Car):
pass
town = TownCar('Nissan', 'blue')
work = WorkCar('UAZ', 'black')
pol = PoliceCar('Ford', 'white')
sport = SportCar('Ferrari', 'red')
car_list = [town, work, pol, sport]
turn_list = ['straight', 'right', 'left', 'backward']
for i in range(10):
rand_func = randint(0, 2)
if rand_func == 0:
print('Меняем скорость')
for j in car_list:
j.go(randint(1,100))
print(j.get_status())
if rand_func == 1:
print('Останавливаемся')
for j in car_list:
j.stop()
print(j.get_status())
if rand_func == 2:
print('Поворачиваем')
for j in car_list:
j.turns(choice(turn_list))
print(j.get_status())
print('-----------')
| true |
4f7ae9321026c5ed138ec8d20f15e92422232c1c
|
Python
|
cener-1999/thinkPython2
|
/p8/practice.py
|
UTF-8
| 1,918 | 3.703125 | 4 |
[] |
no_license
|
def overturn(string):
index=len(string)-1
while index >=0:
print(string[index])
index=index-1
#overturn('hello_world')
#我滴鬼鬼这也太智能了吧!
def fun_for(string):
for letter in string:
print(letter)
#fun_for('so good')
def duckname(prefixes,suffix):
for letter in prefixes:
if(letter=='Q'or letter=='O'):
print(letter+'u'+suffix)
else:
print(letter+suffix)
#duckname('JKLMNOQP','ack')
#print('hello_world'[:])
def find1(string,letter,origin,endpiont):
if (origin>=endpiont or endpiont-1 >len(string)):
print('wrong input!')
return -1
else:
index=origin-1
while index<endpiont-1:
if(string[index]==letter):
break
index=index+1
if(index<=endpiont-1 and (letter==string[index])):
return 'we find'+" '"+letter+"' "+'at '+str(index+1)
else:
return 'we can`t find'+" '"+letter+"' "+'in '+string
#print(find('hello_world','l',1,8))
def find2(string,letter,origin,endpiont):
if (origin>=endpiont or endpiont-1 >len(string)):
return -1
else:
index=origin-1
while index<endpiont-1:
if(string[index]==letter):
break
index=index+1
if(index<=endpiont-1 and (letter==string[index])):
return index+1
else:
return 0
#print(find('hello_world','l',1,8))
def count1(string,goal):
count=0
for letter in string:
if(letter==goal):
count=count+1
return count
#print(count1('abbaacaaa','a'))
def count2(string,goal):
count=0
index=0
while(True):
index=find2(string,goal,index+1,len(string)+1)
if(index>0):
count=count+1
else:
break
return count
print(count2('abbaaca','a'))
| true |
3b5e976d515249b69084cdccce7e5e7a45993855
|
Python
|
Hadirback/python_algorithms_and_data_structures
|
/homework_2_pads/task_1_python.py
|
UTF-8
| 2,172 | 4.25 | 4 |
[] |
no_license
|
'''
1. Написать программу, которая будет складывать, вычитать, умножать или делить два числа.
Числа и знак операции вводятся пользователем. После выполнения вычисления программа не завершается,
а запрашивает новые данные для вычислений. Завершение программы должно выполняться при вводе символа
'0' в качестве знака операции. Если пользователь вводит неверный знак (не '0', '+', '-', '', '/'),
программа должна сообщать об ошибке и снова запрашивать знак операции. Также она должна сообщать пользователю
о невозможности деления на ноль, если он ввел его в качестве делителя.
'''
while True:
operation = input('Введите знак операции: (+ - / *). Если хотите выйти введите 0: ')
if operation == '0':
break
if len(operation) != 1 or (operation != '+' and operation != '-' \
and operation != '/' and operation != '*'):
print('Неизвестная операция! Введите заново числа и операцию!')
continue
num_1 = int(input('Введите число 1: '))
num_2 = int(input('Введите число 2: '))
if num_2 == 0 and operation == '/':
print('Нельзя делить на ноль! Введите заново числа и операцию!')
continue
result_operation = None
if operation == '+':
result_operation = num_1 + num_2
elif operation == '-':
result_operation = num_1 - num_2
elif operation == '*':
result_operation = num_1 * num_2
elif operation == '/':
result_operation = num_1 / num_2
print(f'Результат {num_1} {operation} {num_2} = {result_operation}')
| true |
17003c92290b879aa8e3c6e6665ea996770b98ee
|
Python
|
naidenovaleksei/kutulu
|
/world/world.py
|
UTF-8
| 1,764 | 2.59375 | 3 |
[] |
no_license
|
CELL_EMPTY = '.'
CELL_WALL = '#'
CELL_SPAWN = 'w'
SPAWNING = 0
WANDERING = 1
SANITY_LOSS_LONELY = 3
SANITY_LOSS_GROUP = 1
SANITY_LOSS_SPOOKED = 20
WANDERER_SPAWN_TIME = 3
WANDERER_LIFE_TIME = 40
class KutuluWorld():
def __init__(self, fname='map.txt'):
with open(fname, 'r') as f:
self.map_grid = [ x.strip() for x in f.readlines() ]
self.spawnpoints = []
for x in range(self.width()):
for y in range(self.height()):
if self.cell(x,y) == CELL_SPAWN:
self.spawnpoints.append((x,y))
def cell(self, x, y):
return self.map_grid[y][x]
def is_empty(self, x, y):
return self.cell(x,y) in (CELL_EMPTY, CELL_SPAWN)
def cell_type(self, x, y):
return self.cell(x,y)
def width(self):
return len(self.map_grid[0])
def height(self):
return len(self.map_grid)
def distance( self, x1, y1, x2, y2 ):
return abs(x1 - x2) + abs(y1 - y2)
def approve_move(self, old_x, old_y, new_x, new_y):
if self.distance(old_x, old_y, new_x, new_y) != 1:
return False
return self.is_empty(new_x, new_y)
def get_all_spawnpoints(self):
return self.spawnpoints
def get_furthest_spawnpoints(self, coords):
def get_min_distance(spawnpoint):
return min(self.distance(x,y,*spawnpoint) for x,y in coords)
dists = list(map(get_min_distance, self.spawnpoints))
min_dist = min(dists)
furthest_spawnpoints = [spawnpoint for spawnpoint, dist in zip(self.spawnpoints, dists) if dist == min_dist]
return furthest_spawnpoints
| true |
5d9fe1ac6f9db4a4bf5174d0df9b968e06af5ace
|
Python
|
jdotpy/watchtower
|
/watchtower/bin/worker.py
|
UTF-8
| 854 | 2.625 | 3 |
[] |
no_license
|
#!/usr/bin/env python
from watchtower.web import Watchtower
from watchtower.utils import import_class
from datetime import datetime
from pprint import pprint
import time
import sys
class Worker():
def __init__(self, app):
self.app = app
def run(self):
while True:
print('Doing iteration')
for check in self.app.checks:
result = check.run()
self.app.storage.log_result(check.name, datetime.now(), result)
print('Check {} got result: {}'.format(
check, result
))
time.sleep(30)
pprint(self.app.storage.summary([check.name for check in self.app.checks]))
if __name__ == "__main__":
app = Watchtower()
w = Worker(app)
if '--reset' in sys.argv:
app.storage.reset()
w.run()
| true |
a2f6bed2c6f5ee60b1279ccca5780ca2a3fa85f3
|
Python
|
Ninjalemur/PortfolioSim
|
/tests/test_simulator_init.py
|
UTF-8
| 13,087 | 2.796875 | 3 |
[] |
no_license
|
from portfoliosim import __version__
import portfoliosim as ps
import pandas as pd
def test_version():
assert __version__ == '0.1.0'
def test_simulator_check_desired_income_type():
"""
ensure that Simulator flags non float desired income correctly
Only things castable to float should be accepted
"""
try:
simulation_cofig = {
'starting_portfolio_value': 1000000.0,
"desired_annual_income": 'a',
"inflation": 1.027,
"min_income_multiplier": 0.75,
"max_withdrawal_rate" : 0.02
}
x = ps.Simulator(**simulation_cofig)
assert False, 'ValueError should be raised when desired_annual_income in simulation_cofig cannot be coerced to float'
except ValueError as ve:
assert str(ve) == "desired_annual_income should be castable to float. received 'a' of type <class 'str'>"
def test_simulator_check_inflation_type():
"""
ensure that Simulator flags non float inflation correctly
Only things castable to float should be accepted
"""
try:
simulation_cofig = {
'starting_portfolio_value': 1000000.0,
"desired_annual_income": 100000,
"inflation": 'a',
"min_income_multiplier": 0.75,
"max_withdrawal_rate" : 0.02
}
x = ps.Simulator(**simulation_cofig)
assert False, 'ValueError should be raised when inflation in simulation_cofig cannot be coerced to float'
except ValueError as ve:
assert str(ve) == "inflation should be castable to float. received 'a' of type <class 'str'>"
def test_simulator_check_min_income_multiplier_type():
"""
ensure that Simulator flags non float inflation correctly
Only things castable to float should be accepted
"""
try:
simulation_cofig = {
'starting_portfolio_value': 1000000.0,
"desired_annual_income": 100000,
"inflation": 1.026,
"min_income_multiplier": 'a',
"max_withdrawal_rate" : 0.02
}
x = ps.Simulator(**simulation_cofig)
assert False, 'ValueError should be raised when min_income_multiplier in simulation_cofig cannot be coerced to float'
except ValueError as ve:
assert str(ve) == "min_income_multiplier should be castable to float. received 'a' of type <class 'str'>"
def test_simulator_check_starting_portfolio_value_type():
"""
ensure that Simulator flags non float starting portfolio value correctly
Only things castable to float should be accepted
"""
try:
simulation_cofig = {
'starting_portfolio_value': 'a',
"desired_annual_income": 100000,
"inflation": 1.027,
"min_income_multiplier": 0.75,
"max_withdrawal_rate" : 0.02
}
x = ps.Simulator(**simulation_cofig)
assert False, 'ValueError should be raised when desired_annual_income in simulation_cofig cannot be coerced to float'
except ValueError as ve:
assert str(ve) == "starting_portfolio_value should be castable to float. received 'a' of type <class 'str'>"
def test_simulator_check_simulation_length_type():
"""
ensure that Simulator flags non int length correctly
Only things castable to int should be accepted
"""
try:
simulation_cofig = {
'starting_portfolio_value': 1000000.0,
"desired_annual_income": 100000,
"inflation": 1.026,
"min_income_multiplier": 0.75,
"simulation_length_years" : 'a',
"max_withdrawal_rate" : 0.02
}
x = ps.Simulator(**simulation_cofig)
assert False, 'ValueError should be raised when simulation_length_years in simulation_cofig cannot be coerced to int'
except ValueError as ve:
assert str(ve) == "simulation_length_years should be castable to int. received 'a' of type <class 'str'>"
def test_simulator_generate_income_schedule():
"""
ensure that Simulator income generator calculates income schedule correctly
"""
expected_schedule = pd.DataFrame({
'year': pd.Series([1,2,3],dtype='int'),
'desired_income': pd.Series([100000.0,101000.0,102010.0],dtype='float'),
'min_income':pd.Series([50000.0,50500.0,51005.0],dtype='float')
})
simulation_cofig = {
'starting_portfolio_value': 1000000.0,
"desired_annual_income": 100000,
"inflation": 1.01,
"min_income_multiplier": 0.5,
"simulation_length_years" : 3,
"max_withdrawal_rate" : 0.02
}
x = ps.Simulator(**simulation_cofig)
pd.testing.assert_frame_equal(expected_schedule,x._get_income_schedule())
def test_simulator_check_starting_portfolio_above_zero():
"""
ensure that Simulator flags starting_portfolio_value not larger than 0
Only starting_portfolio_value greater than 0 should be accepted
"""
simulation_cofig = {
"desired_annual_income": 100000,
"inflation": 1.026,
"min_income_multiplier": 0.75,
"simulation_length_years" : 30,
"max_withdrawal_rate" : 0.02
}
for i in [0,-1]:
simulation_cofig['starting_portfolio_value'] = i
try:
x = ps.Simulator(**simulation_cofig)
assert False, 'ValueError should be raised when starting_portfolio_value is not greater than 0'
except ValueError as ve:
assert str(ve) == f"starting_portfolio_value should be greater than zero. received '{i}'"
def test_simulator_check_desired_annual_income_above_zero():
"""
ensure that Simulator flags desired_annual_income not larger than 0
Only desired_annual_income greater than 0 should be accepted
"""
simulation_cofig = {
"starting_portfolio_value": 1000000,
"inflation": 1.026,
"min_income_multiplier": 0.75,
"simulation_length_years" : 30,
"max_withdrawal_rate" : 0.02
}
for i in [0,-1]:
simulation_cofig['desired_annual_income'] = i
try:
x = ps.Simulator(**simulation_cofig)
assert False, 'ValueError should be raised when desired_annual_income is not greater than 0'
except ValueError as ve:
assert str(ve) == f"desired_annual_income should be greater than zero. received '{i}'"
def test_simulator_check_inflation_above_zero():
"""
ensure that Simulator flags inflation not above zero
Only inflation above zero
"""
simulation_cofig = {
"starting_portfolio_value": 1000000,
"desired_annual_income": 100000,
"min_income_multiplier": 0.75,
"simulation_length_years" : 30,
"max_withdrawal_rate" : 0.02
}
for i in [0,-1]:
simulation_cofig['inflation'] = i
try:
x = ps.Simulator(**simulation_cofig)
assert False, 'ValueError should be raised when inflation is not greater than 0'
except ValueError as ve:
assert str(ve) == f"inflation should be greater than zero. received '{i}'"
def test_simulator_check_min_income_between_zero_and_one_inclusive():
"""
ensure that Simulator flags min_income_multiplier not between 0 and 1 inclusive
Only min_income_multiplier between 0 and 1 inclusive should be accepted
"""
simulation_cofig = {
"starting_portfolio_value": 1000000,
"desired_annual_income": 100000,
"inflation": 1.026,
"simulation_length_years" : 30,
"max_withdrawal_rate" : 0.02
}
for i in [-0.4,1.1]:
simulation_cofig['min_income_multiplier'] = i
try:
x = ps.Simulator(**simulation_cofig)
assert False, 'ValueError should be raised when min_income_multiplier is not between 0 and 1 inclusive'
except ValueError as ve:
assert str(ve) == f"min_income_multiplier should be between 0 and 1 inclusive. received '{i}'"
def test_simulator_check_max_withdrawal_more_than_zero_and_smaller_or_equals_one():
"""
ensure that Simulator flags max_withdrawal greater than zero, and less than or equal to one
Only max_withdrawal 0 < x <= 1 should be accepted
"""
simulation_cofig = {
"starting_portfolio_value": 1000000,
"desired_annual_income": 100000,
"inflation": 1.026,
"simulation_length_years" : 30,
"min_income_multiplier" : 0.5
}
for i in [-0.1,0,1.1]:
simulation_cofig['max_withdrawal_rate'] = i
try:
x = ps.Simulator(**simulation_cofig)
assert False, 'ValueError should be raised when max_withdrawal_rate is not 0 < x <= 1'
except ValueError as ve:
assert str(ve) == f"max_withdrawal_rate should be greater than zero and less than or equal to one. received '{i}'"
def test_simulator_check_simulation_length_years_above_zero():
"""
ensure that Simulator flags simulation_length_years not above zero
Only simulation_length_years above zero allowed
"""
simulation_cofig = {
"starting_portfolio_value": 1000000,
"desired_annual_income": 100000,
"min_income_multiplier": 0.75,
"inflation" : 1.025,
"max_withdrawal_rate" : 0.02
}
for i in [0,-1]:
simulation_cofig['simulation_length_years'] = i
try:
x = ps.Simulator(**simulation_cofig)
assert False, 'ValueError should be raised when simulation_length_years is not greater than 0'
except ValueError as ve:
assert str(ve) == f"simulation_length_years should be greater than zero. received '{i}'"
def test_simulator_check_for_valid_assets_in_portfolio():
"""
ensure that Simulator flags portfolio asset classes other than stocks, bonds, cash, gold
Only asset classes stocks, bonds, cash, gold allowed
"""
simulation_cofig = {
"starting_portfolio_value": 1000000,
"desired_annual_income": 100000,
"min_income_multiplier": 0.75,
"inflation" : 1.025,
"max_withdrawal_rate" : 0.02,
"portfolio_allocation" : {
'cats' : 0.6,
'gold' : 0.4
}
}
try:
x = ps.Simulator(**simulation_cofig)
assert False, 'ValueError should be raised when portfolio_allocation contains keys other than stocks, bonds, cash, gold'
except TypeError as ve:
assert str(ve) == f"portfolio assets should only be stocks, bonds, cash, gold. received 'cats'"
def test_simulator_check_for_portfolio_allocation_type():
"""
ensure that Simulator flags portfolio allocations that cannot be converted to float
Only values that can be converted to float are allowed
"""
simulation_cofig = {
'starting_portfolio_value': 1000000.0,
"desired_annual_income": 10000,
"inflation": 1.027,
"min_income_multiplier": 0.75,
"max_withdrawal_rate" : 0.02,
"portfolio_allocation" : {
'stocks' : 'a',
'gold' : 0.4
}
}
try:
x = ps.Simulator(**simulation_cofig)
assert False, 'ValueError should be raised when protfolio_allocation value in simulation_cofig cannot be coerced to float'
except ValueError as ve:
assert str(ve) == "portfolio_allocation for stocks should be castable to float. received 'a' of type <class 'str'>"
def test_simulator_check_for_portfolio_allocation_type():
"""
ensure that Simulator flags portfolio allocations are below zero
Only values that are above 1 are allowed
"""
simulation_cofig = {
'starting_portfolio_value': 1000000.0,
"desired_annual_income": 10000,
"inflation": 1.027,
"min_income_multiplier": 0.75,
"max_withdrawal_rate" : 0.02,
"portfolio_allocation" : {
'stocks' : -0.5,
'gold' : 0.4
}
}
try:
x = ps.Simulator(**simulation_cofig)
assert False, 'ValueError should be raised when protfolio_allocation value in simulation_cofig are at least zero'
except ValueError as ve:
assert str(ve) == "portfolio_allocation for stocks should be at least zero. received '-0.5'"
def test_simulator_check_cash_buffer_years_at_least_zero():
"""
ensure that Simulator flags cash_buffer_years not at least 0
Only cash_buffer_years at least 0 should be accepted
"""
simulation_cofig = {
"starting_portfolio_value": 1000000,
"desired_annual_income": 100000,
"inflation": 1.026,
"min_income_multiplier": 0.75,
"simulation_length_years" : 30,
"max_withdrawal_rate" : 0.02,
'cash_buffer_years': -1
}
try:
x = ps.Simulator(**simulation_cofig)
assert False, 'ValueError should be raised when cash_buffer_years is not at least 0'
except ValueError as ve:
assert str(ve) == "cash_buffer_years should be at least zero. received '-1'"
| true |
01f8d61febf6baa63df67fceb31f6196b9fb5cf1
|
Python
|
akashshegde11/python-practice
|
/Introduction/dict_3.py
|
UTF-8
| 269 | 4.65625 | 5 |
[] |
no_license
|
# Accessing elements from a dictionary
dict1 = {1: 'Geeks', 'name': 'for', 3: 'Geeks'}
print("Accessing element using a key: ")
print(dict1['name'])
print("Accessing element using a key: ")
print(dict1[1])
print("Accessing element using get: ")
print(dict1.get(3))
| true |
c7a01905abccd90f3ebb92753d03fb8c85138b83
|
Python
|
tushgup/python-basics
|
/solutions.py
|
UTF-8
| 2,266 | 4.0625 | 4 |
[] |
no_license
|
#
1. Count no of letters and digits
countL = 0;
countD = 0;
for c in "Test123":
if (c.isalpha()):
countL = countL + 1;
else :
countD = countD + 1;
print("No of letters: ", countL);
print("No of digits: ", countD);
#
2. Remove punctuation
import string
s = "It's a good day"
for c in s:
if c not in string.punctuation:
print(c, end = '')
# 3. Sort alphabets in string
s = "bdca"
b = sorted(s)
c = ''.join(b)
print(c)
# 4. no of each vowel
s = "aabcdee"
vowel = "aeiou"
v = {}.fromkeys(vowel, 0)
for c in s:
if c in v:
v[c] = v[c] + 1
print(v)
# 5. palindrome string
s = "abba"
s1 = s[::-1]
if (s == s1):
print("String is palindrome")
else :
print("String is not palindrome")
# 6. tuple operations
tupleem = ()
print(tupleem)
tuple1 = (1, "test", 1.3)
print(tuple1)
print(tuple1[0: 2])
tuple2 = tuple1 + tuple('b')
print(tuple2)
l = list(tuple2)
print(l)
# 7. tuple to str
tuple1 = ('t', 'u', 's', 'h')
str1 = ''.join(tuple1)
print(str1)
# 8. del tuple element
tuple1 = ('t', 'u', 's', 'h')
l = list(tuple1)
l.remove('u')
t1 = tuple(l)
print(t1)
# 9. check whether item exists in tuple
tuple1 = ('t', 'u', 's', 'h')
for c in tuple1:
if c is 'h':
print("Element found")
else :
print("Not found")
# 10. change last value tuple
tuple1 = ('t', 'u', 's', 'h')
l = list(tuple1)
l[-1] = 'a'
t1 = tuple(l)
print(t1)
# 11. string concat
x = "hello"
y = "world"
res = x + " " + y
print(res)
# 12. set operations
x = set([1, 2, 3, 4])
y = set([7, 8, 3, 4])
x.remove(3)
print(x)
print(x.union(y))
print(x.intersection(y))
print(x.difference(y))
# 13. array
from array
import array
x = array('I', [1, 2, 3, 4])
for i in x:
print(i)
# 14. list unique elements
x = [1, 2, 3, 1, 4, 2]
y = set(x)
print(y)
# 15. array insertion
x = []
for i in range(3):
x.append([])
for j in range(3):
val = int(input("Enter value"))
x[i].append(val)
print(x)
# 16. 2 d array multiplication
import numpy as np
x = [
[1, 2, 3],
[4, 5, 6],
[8, 9, 10]
]
y = [
[3, 2, 1],
[5, 4, 3, ],
[1, 7, 4]
]
res = np.multiply(x, y)
print(res)
# 17. factors of number
x = 10
for i in range(1, x + 1):
if x % i == 0:
print(i)
# 18. Find HCF / GCD
from math
import gcd
x = gcd(20, 8)
print(x)
| true |
971f417417ee4cd01a8a195ec24d33ff6ad9f066
|
Python
|
Erotemic/ubelt
|
/ubelt/util_zip.py
|
UTF-8
| 15,415 | 3.3125 | 3 |
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
"""
Abstractions for working with zipfiles and archives
This may be renamed to util_archive in the future.
The :func:`ubelt.split_archive` works with paths that reference a file inside
of an archive (e.g. a zipfile). It splits it into two parts, the full path to
the archive and then the path to the file inside of the archive. By convention
these are separated with either a pathsep or a colon.
The :func:`ubelt.zopen` works to open a file that lives inside of an archive
without the user needing to worry about extracting it first. When possible it
will read it directly from the archive, but in some cases it may extract it to
a temporary directory first.
"""
import io
import os
from os.path import exists, join
from ubelt.util_mixins import NiceRepr
__all__ = ['zopen', 'split_archive']
def split_archive(fpath, ext='.zip'):
"""
If fpath specifies a file inside a zipfile, it breaks it into two parts the
path to the zipfile and the internal path in the zipfile.
Args:
fpath (str | PathLike): path that specifies a path inside of an archive
ext (str): archive extension
Returns:
Tuple[str, str | None]
Example:
>>> split_archive('/a/b/foo.txt')
>>> split_archive('/a/b/foo.zip/bar.txt')
>>> split_archive('/a/b/foo.zip/baz/biz.zip/bar.py')
>>> split_archive('archive.zip')
>>> import ubelt as ub
>>> split_archive(ub.Path('/a/b/foo.zip/baz/biz.zip/bar.py'))
>>> split_archive('/a/b/foo.zip/baz.pt/bar.zip/bar.zip', '.pt')
TODO:
Fix got/want for win32
(None, None)
('/a/b/foo.zip', 'bar.txt')
('/a/b/foo.zip/baz/biz.zip', 'bar.py')
('archive.zip', None)
('/a/b/foo.zip/baz/biz.zip', 'bar.py')
('/a/b/foo.zip/baz.pt', 'bar.zip/bar.zip')
"""
import re
fpath = os.fspath(fpath)
# fpath = os.fspath(fpath)
pat = '({}[{}/:])'.format(re.escape(ext), re.escape(os.path.sep))
# pat = r'(\'' + ext + '[' + re.escape(os.path.sep) + '/:])'
parts = re.split(pat, fpath, flags=re.IGNORECASE)
if len(parts) > 2:
archivepath = ''.join(parts[:-1])[:-1]
internal = parts[-1]
elif len(parts) == 1:
archivepath = parts[0]
if not archivepath.endswith(ext):
archivepath = None
internal = None
else: # nocover
raise AssertionError('impossible state')
return archivepath, internal
class zopen(NiceRepr):
"""
An abstraction of the normal :func:`open` function that can also handle
reading data directly inside of zipfiles.
This is a file-object like interface [FileObj] --- i.e. it supports the
read and write methods to an underlying resource.
Can open a file normally or open a file within a zip file (readonly).
Tries to read from memory only, but will extract to a tempfile if necessary.
Just treat the zipfile like a directory,
e.g. /path/to/myzip.zip/compressed/path.txt OR?
e.g. /path/to/myzip.zip:compressed/path.txt
References:
.. [FileObj] https://docs.python.org/3/glossary.html#term-file-object
TODO:
- [ ] Fast way to open a base zipfile, query what is inside, and
then choose a file to further zopen (and passing along the same
open zipfile reference maybe?).
- [ ] Write mode in some restricted setting?
Example:
>>> from ubelt.util_zip import * # NOQA
>>> import pickle
>>> import ubelt as ub
>>> dpath = ub.Path.appdir('ubelt/tests/util_zip').ensuredir()
>>> dpath = ub.Path(dpath)
>>> data_fpath = dpath / 'test.pkl'
>>> data = {'demo': 'data'}
>>> with open(str(data_fpath), 'wb') as file:
>>> pickle.dump(data, file)
>>> # Write data
>>> import zipfile
>>> zip_fpath = dpath / 'test_zip.archive'
>>> stl_w_zfile = zipfile.ZipFile(os.fspath(zip_fpath), mode='w')
>>> stl_w_zfile.write(os.fspath(data_fpath), os.fspath(data_fpath.relative_to(dpath)))
>>> stl_w_zfile.close()
>>> stl_r_zfile = zipfile.ZipFile(os.fspath(zip_fpath), mode='r')
>>> stl_r_zfile.namelist()
>>> stl_r_zfile.close()
>>> # Test zopen
>>> self = zopen(zip_fpath / 'test.pkl', mode='rb', ext='.archive')
>>> print(self._split_archive())
>>> print(self.namelist())
>>> self.close()
>>> self = zopen(zip_fpath / 'test.pkl', mode='rb', ext='.archive')
>>> recon1 = pickle.loads(self.read())
>>> self.close()
>>> self = zopen(zip_fpath / 'test.pkl', mode='rb', ext='.archive')
>>> recon2 = pickle.load(self)
>>> self.close()
>>> assert recon1 == recon2
>>> assert recon1 is not recon2
Example:
>>> # Test we can load json data from a zipfile
>>> from ubelt.util_zip import * # NOQA
>>> import ubelt as ub
>>> import json
>>> import zipfile
>>> dpath = ub.Path.appdir('ubelt/tests/util_zip').ensuredir()
>>> infopath = join(dpath, 'info.json')
>>> ub.writeto(infopath, '{"x": "1"}')
>>> zippath = join(dpath, 'infozip.zip')
>>> internal = 'folder/info.json'
>>> with zipfile.ZipFile(zippath, 'w') as myzip:
>>> myzip.write(infopath, internal)
>>> fpath = zippath + '/' + internal
>>> # Test context manager
>>> with zopen(fpath, 'r') as self:
>>> info2 = json.load(self)
>>> assert info2['x'] == '1'
>>> # Test outside of context manager
>>> self = zopen(fpath, 'r')
>>> print(self._split_archive())
>>> info2 = json.load(self)
>>> assert info2['x'] == '1'
>>> # Test nice repr (with zfile)
>>> print('self = {!r}'.format(self))
>>> self.close()
Example:
>>> # Coverage tests --- move to unit-test
>>> from ubelt.util_zip import * # NOQA
>>> import ubelt as ub
>>> import json
>>> import zipfile
>>> dpath = ub.Path.appdir('ubelt/tests/util_zip').ensuredir()
>>> textpath = join(dpath, 'seekable_test.txt')
>>> text = chr(10).join(['line{}'.format(i) for i in range(10)])
>>> ub.writeto(textpath, text)
>>> zippath = join(dpath, 'seekable_test.zip')
>>> internal = 'folder/seekable_test.txt'
>>> with zipfile.ZipFile(zippath, 'w') as myzip:
>>> myzip.write(textpath, internal)
>>> ub.delete(textpath)
>>> fpath = zippath + '/' + internal
>>> # Test seekable
>>> self_seekable = zopen(fpath, 'r', seekable=True)
>>> assert self_seekable.seekable()
>>> self_seekable.seek(8)
>>> assert self_seekable.readline() == 'ne1' + chr(10)
>>> assert self_seekable.readline() == 'line2' + chr(10)
>>> self_seekable.seek(8)
>>> assert self_seekable.readline() == 'ne1' + chr(10)
>>> assert self_seekable.readline() == 'line2' + chr(10)
>>> # Test non-seekable?
>>> # Sometimes non-seekable files are still seekable
>>> maybe_seekable = zopen(fpath, 'r', seekable=False)
>>> if maybe_seekable.seekable():
>>> maybe_seekable.seek(8)
>>> assert maybe_seekable.readline() == 'ne1' + chr(10)
>>> assert maybe_seekable.readline() == 'line2' + chr(10)
>>> maybe_seekable.seek(8)
>>> assert maybe_seekable.readline() == 'ne1' + chr(10)
>>> assert maybe_seekable.readline() == 'line2' + chr(10)
Example:
>>> # More coverage tests --- move to unit-test
>>> from ubelt.util_zip import * # NOQA
>>> import ubelt as ub
>>> import pytest
>>> dpath = ub.Path.appdir('ubelt/tests/util_zip').ensuredir()
>>> with pytest.raises(OSError):
>>> self = zopen('', 'r')
>>> # Test open non-zip exsting file
>>> existing_fpath = join(dpath, 'exists.json')
>>> ub.writeto(existing_fpath, '{"x": "1"}')
>>> self = zopen(existing_fpath, 'r')
>>> assert self.read() == '{"x": "1"}'
>>> # Test dir
>>> dir(self)
>>> # Test nice
>>> print(self)
>>> print('self = {!r}'.format(self))
>>> self.close()
>>> # Test open non-zip non-existing file
>>> nonexisting_fpath = join(dpath, 'does-not-exist.txt')
>>> ub.delete(nonexisting_fpath)
>>> with pytest.raises(OSError):
>>> self = zopen(nonexisting_fpath, 'r')
>>> with pytest.raises(NotImplementedError):
>>> self = zopen(nonexisting_fpath, 'w')
>>> # Test nice-repr
>>> self = zopen(existing_fpath, 'r')
>>> print('self = {!r}'.format(self))
>>> # pathological
>>> self = zopen(existing_fpath, 'r')
>>> self._handle = None
>>> dir(self)
"""
def __init__(self, fpath, mode='r', seekable=False, ext='.zip'):
"""
Args:
fpath (str | PathLike):
path to a file, or a special path that denotes both a
path to a zipfile and a path to a archived file inside of
the zipfile.
mode (str):
Currently only "r" - readonly mode is supported
seekable (bool):
If True, attempts to force "seekability" of the underlying
file-object, for compressed files this will first extract
the file to a temporary location on disk. If False, any underlying
compressed file will be opened directly which may result in the
object being non-seekable.
ext (str):
The extension of the zipfile. Modify this is a non-standard
extension is used (e.g. for torch packages).
"""
self.fpath = fpath
self.ext = ext
self.name = fpath
self.mode = mode
self._seekable = seekable
self._zfpath = None # points to the base zipfile (if appropriate)
self._temp_dpath = None # for temporary extraction
self._zfile_read = None # underlying opened zipfile object
# The _handle pointer should be a file-like object that this zopen
# object impersonate, by forwarding most every getattr call to it.
self._handle = None
self._open()
@property
def zfile(self):
"""
Access the underlying archive file
"""
if self._zfile_read is None:
import zipfile
archivefile, internal = self._split_archive()
myzip = zipfile.ZipFile(archivefile, 'r')
self._zfile_read = myzip
return self._zfile_read
def namelist(self):
"""
Lists the contents of this zipfile
"""
myzip = self.zfile
namelist = myzip.namelist()
return namelist
def __nice__(self):
if self._zfpath is None:
return 'handle={}, mode={}'.format(str(self._handle), self.mode)
else:
return 'handle={} in zipfpath={}, mode={}'.format(self._handle, self._zfpath, self.mode)
def __getattr__(self, key):
# Expose attributes of wrapped handle
if hasattr(self._handle, key):
assert self._handle is not self
return getattr(self._handle, key)
raise AttributeError(key)
def __dir__(self):
# Expose attributes of wrapped handle
zopen_attributes = {
'namelist',
'zfile',
}
keyset = set(dir(super(zopen, self)))
keyset.update(set(self.__dict__.keys()))
if self._handle is not None:
keyset.update(set(dir(self._handle)))
return sorted(keyset | zopen_attributes)
def _cleanup(self):
# print('self._cleanup = {!r}'.format(self._cleanup))
if self._handle is not None:
if not getattr(self, 'closed', True):
closemethod = getattr(self, 'close', None)
if closemethod is not None: # nocover
closemethod()
closemethod = None
self._handle = None
if self._temp_dpath and exists(self._temp_dpath):
# os.unlink(self._temp_dpath)
import ubelt as ub
ub.delete(self._temp_dpath)
def __del__(self):
self._cleanup()
def _split_archive(self):
archivefile, internal = split_archive(self.fpath, self.ext)
return archivefile, internal
def _open(self):
"""
This logic sets the "_handle" to the appropriate backend object
such that zopen can behave like a standard IO object.
In read-only mode:
* If fpath is a normal file, _handle is the standard `open` object
* If fpath is a seekable zipfile, _handle is an IOWrapper pointing
to the internal data
* If fpath is a non-seekable zipfile, the data is extracted behind
the scenes and a standard `open` object to the extracted file
is given.
In write mode:
* NotImpelemented
"""
if 'r' not in self.mode:
raise NotImplementedError('Only read mode is supported for now')
_handle = None
fpath = os.fspath(self.fpath)
if exists(fpath):
_handle = open(fpath, self.mode)
elif self.ext + '/' in fpath or self.ext + os.path.sep in fpath:
archivefile, internal = self._split_archive()
myzip = self.zfile
if self._seekable:
import tempfile
# If we need data to be seekable, then we must extract it to a
# temporary file first.
self._temp_dpath = tempfile.mkdtemp(prefix='zopen_')
temp_fpath = join(self._temp_dpath, internal)
myzip.extract(internal, self._temp_dpath)
_handle = open(temp_fpath, self.mode)
else:
# Try to load data directly from the zipfile
_handle = myzip.open(internal, 'r')
if self.mode == 'rb':
data = _handle.read()
_handle = io.BytesIO(data)
elif self.mode == 'r':
# FIXME: does not always work. handle seems to be closed
# too soon in the case util.zopen(module.__file__).read()
_handle = io.TextIOWrapper(_handle)
else:
raise KeyError(self.mode)
self._zfpath = archivefile
if _handle is None:
raise IOError('file {!r} does not exist'.format(fpath))
self._handle = _handle
def __enter__(self):
return self
def __exit__(self, ex_type, ex_value, ex_traceback):
"""
Args:
ex_type (Type[BaseException] | None):
ex_value (BaseException | None):
ex_traceback (TracebackType | None):
Returns:
bool | None
"""
self.close()
# TODO: Allow for navigating inside of the zipfile
# TODO: opening a member should not force disk decompression unless we
# really need to do real seeks. If we are just streaming the first few
# bytes, then a standard handle will work fine.
| true |
495f79c3333bf6f087449a0744eaef4bb66dc010
|
Python
|
twarogm/pp1
|
/01-TypesAndVariables/Exercises/01-27.py
|
UTF-8
| 149 | 3.390625 | 3 |
[] |
no_license
|
import math
a = int (input("wprowadz 1 liczbe naturalna"))
b = int (input("Wprowadz 2 liczbe naturalna"))
nwd = math.gcd(a,b)
print(f"NWD to {nwd}")
| true |
249baf437ad150a920d4466ee85f85dd3b555112
|
Python
|
zachdj/ultimate-tic-tac-toe
|
/services/SettingsService.py
|
UTF-8
| 616 | 2.578125 | 3 |
[
"MIT"
] |
permissive
|
"""
The Settings singleton keeps track of application-wide settings
"""
# definition of themes
default_theme = {
"path_prefix": "default",
"id": 0,
"name": "Default",
"primary": (117, 64, 160),
"secondary": (61, 189, 73),
"tertiary": (150, 150, 150),
"widget_background": (63, 63, 63),
"widget_highlight": (100, 100, 100),
"font": (200, 220, 220)
}
themes = [default_theme]
# actual settings
sfx_volume = 100
music_volume = 100
theme = default_theme
# accessors
def set_theme(selected_theme):
global theme
theme = selected_theme
def get_themes():
return themes
| true |
8433b6f712c83d9455b425a5d3d288dde00b18aa
|
Python
|
john-odonnell/csc_212
|
/labs/lab11/lab11.py
|
UTF-8
| 7,704 | 3.8125 | 4 |
[] |
no_license
|
import sys
import unittest
class Node:
def __init__(self, key):
self.left: Node = None
self.right: Node = None
self.key: str = key
self.count: int = 1
class BST:
""" Binary Search Tree.
"""
def __init__(self):
self.root: Node = None
self.unique_words: int = 0
self.total_words: int = 0
self.most_frequent: Node = None
def insert(self, key: str):
""" Inserts the given key into the BST.
"""
key = toLower(removePunctuation(key))
if len(key) < 1:
return
new = Node(key)
if self.root is None:
self.root = new
self.most_frequent = new
else:
placed = False
temp = self.root
while not placed:
if key < temp.key:
if temp.left is None:
temp.left = new
placed = True
self.unique_words += 1
self.total_words += 1
else:
temp = temp.left
elif key > temp.key:
if temp.right is None:
temp.right = new
placed = True
self.unique_words += 1
self.total_words += 1
else:
temp = temp.right
elif key == temp.key:
temp.count += 1
placed = True
self.total_words += 1
if placed:
if new.count > self.most_frequent.count:
self.most_frequent = new
def remove(self, key: str):
""" Remove the given key from the BST.
"""
self._remove(self.root, key, False)
def _remove(self, node: Node, key: int, found: bool):
if node.key != key and not found:
if key > node.key:
self._remove(node.right, key, found)
if node.right.key == key:
node.right = None
return
elif key < node.key:
self._remove(node.left, key, found)
if node.left.key == key:
node.left = None
return
if node.key == key:
found = True
children = self._numberOfChildren(node)
if children == 0:
node = None
elif children == 1:
if node.right is not None:
node.key = node.right.key
node.left = node.right.left
node.right = node.right.right
elif node.left is not None:
node.key = node.left.key
node.right = node.left.right
node.left = node.left.left
elif children == 2:
this_node = node.left
if this_node.right is not None:
while this_node.right.right is not None:
this_node = this_node.right
if this_node.right is None:
node.key = this_node.key
node.left = this_node.left
else:
node.key = this_node.right.key
if this_node.right.left is not None:
this_node.right = this_node.right.left
else:
this_node.right = None
return
def _numberOfChildren(self, node: Node):
if node.left is not None and node.right is not None:
return 2
elif node.left is None and node.right is None:
return 0
else:
return 1
def populate(self, A: list):
""" Creates a tree from the elements in A.
"""
for i in range(len(A)):
self.insert(A[i])
def inorder(self):
""" Performs an inorder traversal of the BST, printing each element.
"""
self._inorder(self.root)
def _inorder(self, node):
if node is None:
return
else:
self._inorder(node.left)
print(node.key)
self._inorder(node.right)
return
def preorder(self):
""" Performs a preorder traversal of the BST, printing each element.
"""
self._preorder(self.root)
def _preorder(self, node):
if node is None:
return
else:
print(node.key)
self._preorder(node.left)
self._preorder(node.right)
def postorder(self):
""" Performs a postorder traversal of the BST, printing each element.
"""
self._postorder(self.root)
def _postorder(self, node):
if node is None:
return
else:
self._postorder(node.left)
self._postorder(node.right)
print(node.key)
def search(self, key: int) -> bool:
""" Returns whether or not the given key is in the BST.
"""
return self._search(key, self.root)
def _search(self, key: int, node) -> bool:
if node is None:
return False
elif node.key == key:
return True
else:
if key < node.key:
return self._search(key, node.left)
elif key > node.key:
return self._search(key, node.right)
def height(self) -> int:
""" Returns the height of the BST.
"""
return self._height(self.root, 0)
def _height(self, node: Node, max_height: int) -> int:
if node.left is None and node.right is None:
return max_height
elif self._numberOfChildren(node) == 2:
left_height = self._height(node.left, max_height + 1)
right_height = self._height(node.right, max_height + 1)
if left_height > right_height:
return left_height
else:
return right_height
elif self._numberOfChildren(node) == 1:
if node.left is not None:
return self._height(node.left, max_height + 1)
elif node.right is not None:
return self._height(node.right, max_height + 1)
def removePunctuation(s: str) -> str:
""" Removes non-alphanumeric characters.
"""
new = ""
for i in range(len(s)):
if s[i].isalnum():
new += s[i]
return new
def toLower(s: str) -> str:
""" Convert the string to lowercase, in place.
"""
return s.lower()
class Tests(unittest.TestCase):
def test_removepun(self):
tree = BST()
tree.insert("goodday....")
self.assertEqual(tree.root.key, "goodday")
def main(filename):
""" Main.
"""
# Open the file, and store its contents as a list,
# where each element is a line from the file
with open(filename, 'r') as f:
# with open("pride.txt", "r") as f:
story = f.readlines()
book = BST()
for line in story:
line = line.strip().split()
for word in line:
book.insert(word)
print("Height . " + str(book.height()))
print("Total Words . " + str(book.total_words))
print("Unique Words . " + str(book.unique_words))
print("Most frequent . " + str(book.most_frequent.key) + " - " + str(book.most_frequent.count))
return
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('filename')
args = parser.parse_args()
main(filename=args.filename)
# main()
| true |