text stringlengths 38 1.54M |
|---|
a=input("Enter number 1") # input number 1 ex. 5
b=input("Enter number 2") # input number 2 ex. 7
print (a+b) # output will be 57 because data type of input function is string by default so when we add strings they get concatenated
|
'''
面向对象思想
'''
'''
软件编程实质:
软件编程就是将我们的思想转变成计算机能够识别语言的一个过程
什么是面向过程?
自上而下顺序执行,逐步求精
其程序结构是按功能划分为若干个基本模块,这是树状结构
各模块之间的关系尽可能简单,子啊功能上相对独立
每一模块内部均是由顺序,选择和循环三种基本机构
其模块化死心啊的具体方法是使用子程序
程序流程在写程序时就已决定
什么是面向对象?
把数据及对数据的操作方法放在一起,作为一个实体--对象
对同类对象抽象出其共性,形成类。
类中的大多数数据,只能用本类的方法进行处理
类通过一个简单的外部结构与外界发送关系,对象通过消息进行通信。
程序流程由用户在使用中决定
理解面向对象
面向对象是相对面向过程而言
面向对象和面向过程都是一种思想
面向过程
强调的是功能行为
关注的是解决问题需要哪些步骤
面向对象
将功能封装进对象,强调具备了功能的对象
关注的是解决问题需要哪些对象
面向对象是基于面向过程的
面向对象的特点
是一种符合人们思考习惯的思想
可以将复杂的事情简单化
将程序员从执行者换成了指挥者
完成需求时
先要去找具有所需的功能的对象来用
如果该对象不存在,那么创建一个具有所需功能的对象
类与对象的关系
使用计算机语言就是不断的在描述现实生活中的事物
Python中描述事物通过类的形式体现,类似具体事物概念上的定义
对象即是该类事物实实在在存在的个体
类的定义
生活中描述事物无非就是描述事物的名称/属性和行为
如:人的身高,体重等属性,说话,走路等行为
Python中用类来描述事物也是如此
属性:对应类中的成员变量
行为:对应类中的成员方法
定义类其实在定义类中的成员(成员变量和成员方法)
拥有相同(或者类似)属性和行为的对象都可以
类的设计
只关心3样东西
事物名称(类名):人(person)
属性:身高(height),年龄(age)
行为(功能):跑(run),打架(fight)
''' |
# coding=utf-8
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
class CourseViewTestCase(TestCase):
def setUp(self):
self.client = Client()
self.url = reverse('app:course:manage')
#def tearDown(self):
# pass
def test_course_ok(self):
response = self.client.get(self.url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'course/index.html')
def test_course_error(self):
pass |
#!/usr/bin/python
#-*- coding:utf-8 -*-
import xlrd
class Dingdan(object):
def chaxun_shuju(self):
chaxun = []
f = xlrd.open_workbook(r'C:\Users\kong\Desktop\python学习\接口框架练习\data\dingdan_chaxundingdan.xlsx')
sheet = f.sheets()[0]
aa = sheet.nrows
for i in range(aa):
if i == 0:
continue
else:
chaxun.append(sheet.row_values(i))
return chaxun
def yanqi_dingdan(self):
yanqi=[]
f=xlrd.open_workbook(r'C:\Users\kong\Desktop\python学习\接口框架练习\data\dingdan_quxiaoyanqidingdan.xlsx')
sheet=f.sheets()[0]
ac=sheet.nrows
for j in range(ac):
if j ==0:
continue
else:
yanqi.append(sheet.row_values(j))
return yanqi
def dingdan_mingxi(self):
mingxi=[]
f=xlrd.open_workbook(r'C:\Users\kong\Desktop\软件测试\接口测试\接口测试实践\别克测试用例\订单明细\订单明细变量.xlsx')
sheet=f.sheets()[0]
mx=sheet.nrows
for i in range(mx):
if i == 0:
continue
else:
mingxi.append(sheet.row_values(i))
return mingxi
def peijian_mingxi(self):
peijian=[]
f=xlrd.open_workbook(r'C:\Users\kong\Desktop\python学习\接口框架练习\data\dingdan_peijianmingxi.xlsx')
sheet=f.sheets()[0]
ab=sheet.nrows
for i in range(ab):
if i == 0:
continue
else:
peijian.append(sheet.row_values(i))
return peijian
|
import time, json, requests, os, sys
from ConfigParser import ConfigParser
from lxml import etree
from datetime import datetime
import pystache
from lib import (
get_bbox, getstate, getosc, point_in_box, point_in_poly,
hasbuildingtag, getaddresstags, hasaddresschange, loadChangeset,
addchangeset, html_tmpl, text_tmpl
)
dir_path = os.path.dirname(os.path.abspath(__file__))
#
# Configure for use. See config.ini for details.
#
config = ConfigParser()
config.read(os.path.join(dir_path, 'config.ini'))
#
# Environment variables override config file.
#
if 'AREA_GEOJSON' in os.environ:
config.set('area', 'geojson', os.environ['AREA_GEOJSON'])
if 'MAILGUN_DOMAIN' in os.environ:
config.set('mailgun', 'domain', os.environ['MAILGUN_DOMAIN'])
if 'MAILGUN_API_KEY' in os.environ:
config.set('mailgun', 'api_key', os.environ['MAILGUN_API_KEY'])
if 'EMAIL_RECIPIENTS' in os.environ:
config.set('email', 'recipients', os.environ['EMAIL_RECIPIENTS'])
#
# Get started with the area of interest (AOI).
#
aoi_href = config.get('area', 'geojson')
aoi_file = os.path.join(dir_path, aoi_href)
if os.path.exists(aoi_file):
# normal file, available locally
aoi = json.load(open(aoi_file))
else:
# possible remote file, try to request it
aoi = requests.get(aoi_href).json()
aoi_poly = aoi['features'][0]['geometry']['coordinates'][0]
aoi_box = get_bbox(aoi_poly)
sys.stderr.write('getting state\n')
osc_file = getosc()
sys.stderr.write('reading file\n')
nids = set()
changesets = {}
stats = {}
stats['buildings'] = 0
stats['addresses'] = 0
sys.stderr.write('finding points\n')
# Find nodes that fall within specified area
context = iter(etree.iterparse(osc_file, events=('start', 'end')))
event, root = context.next()
for event, n in context:
if event == 'start':
if n.tag == 'node':
lon = float(n.get('lon', 0))
lat = float(n.get('lat', 0))
if point_in_box(lon, lat, aoi_box) and point_in_poly(lon, lat, aoi_poly):
cid = n.get('changeset')
nid = n.get('id', -1)
nids.add(nid)
ntags = n.findall(".//tag[@k]")
addr_tags = getaddresstags(ntags)
version = int(n.get('version'))
# Capture address changes
if version != 1:
if hasaddresschange(nid, addr_tags, version, 'node'):
addchangeset(n, cid, changesets)
changesets[cid]['nids'].add(nid)
changesets[cid]['addr_chg_nd'].add(nid)
stats['addresses'] += 1
elif len(addr_tags):
addchangeset(n, cid, changesets)
changesets[cid]['nids'].add(nid)
changesets[cid]['addr_chg_nd'].add(nid)
stats['addresses'] += 1
n.clear()
root.clear()
sys.stderr.write('finding changesets\n')
# Find ways that contain nodes that were previously determined to fall within specified area
context = iter(etree.iterparse(osc_file, events=('start', 'end')))
event, root = context.next()
for event, w in context:
if event == 'start':
if w.tag == 'way':
relevant = False
cid = w.get('changeset')
wid = w.get('id', -1)
# Only if the way has 'building' tag
if hasbuildingtag(w):
for nd in w.iterfind('./nd'):
if nd.get('ref', -2) in nids:
relevant = True
addchangeset(w, cid, changesets)
nid = nd.get('ref', -2)
changesets[cid]['nids'].add(nid)
changesets[cid]['wids'].add(wid)
if relevant:
stats['buildings'] += 1
wtags = w.findall(".//tag[@k]")
version = int(w.get('version'))
addr_tags = getaddresstags(wtags)
# Capture address changes
if version != 1:
if hasaddresschange(wid, addr_tags, version, 'way'):
changesets[cid]['addr_chg_way'].add(wid)
stats['addresses'] += 1
elif len(addr_tags):
changesets[cid]['addr_chg_way'].add(wid)
stats['addresses'] += 1
w.clear()
root.clear()
changesets = map(loadChangeset, changesets.values())
stats['total'] = len(changesets)
if len(changesets) > 1000:
changesets = changesets[:999]
stats['limit_exceed'] = 'Note: For performance reasons only the first 1000 changesets are displayed.'
now = datetime.now()
html_version = pystache.render(html_tmpl, {
'changesets': changesets,
'stats': stats,
'date': now.strftime("%B %d, %Y")
})
text_version = pystache.render(text_tmpl, {
'changesets': changesets,
'stats': stats,
'date': now.strftime("%B %d, %Y")
})
resp = requests.post(('https://api.mailgun.net/v2/%s/messages' % config.get('mailgun', 'domain')),
auth = ('api', config.get('mailgun', 'api_key')),
data = {
'from': 'Change Within <changewithin@%s>' % config.get('mailgun', 'domain'),
'to': config.get('email', 'recipients').split(),
'subject': 'OSM building and address changes %s' % now.strftime("%B %d, %Y"),
'text': text_version,
"html": html_version,
})
f_out = open('osm_change_report_%s.html' % now.strftime("%m-%d-%y"), 'w')
f_out.write(html_version.encode('utf-8'))
f_out.close()
os.unlink(osc_file)
# print html_version
# print resp, resp.text
|
import threading
import socket
import base64
from typing import TYPE_CHECKING
from PyQt5.QtCore import QObject, pyqtSignal, pyqtProperty, pyqtSlot
from electrum.i18n import _
from electrum.plugin import hook
from electrum.bip32 import xpub_type, BIP32Node
from electrum.util import UserFacingException
from electrum import keystore
from electrum.gui.qml.qewallet import QEWallet
from electrum.gui.qml.plugins import PluginQObject
from .trustedcoin import (TrustedCoinPlugin, server, ErrorConnectingServer,
MOBILE_DISCLAIMER, get_user_id, get_signing_xpub,
TrustedCoinException, make_xpub)
if TYPE_CHECKING:
from electrum.gui.qml import ElectrumQmlApplication
from electrum.wallet import Abstract_Wallet
class Plugin(TrustedCoinPlugin):
class QSignalObject(PluginQObject):
canSignWithoutServerChanged = pyqtSignal()
_canSignWithoutServer = False
termsAndConditionsRetrieved = pyqtSignal([str], arguments=['message'])
termsAndConditionsError = pyqtSignal([str], arguments=['message'])
otpError = pyqtSignal([str], arguments=['message'])
otpSuccess = pyqtSignal()
disclaimerChanged = pyqtSignal()
keystoreChanged = pyqtSignal()
otpSecretChanged = pyqtSignal()
_otpSecret = ''
shortIdChanged = pyqtSignal()
_shortId = ''
billingModelChanged = pyqtSignal()
_billingModel = []
_remoteKeyState = ''
remoteKeyStateChanged = pyqtSignal()
remoteKeyError = pyqtSignal([str], arguments=['message'])
requestOtp = pyqtSignal()
def __init__(self, plugin, parent):
super().__init__(plugin, parent)
@pyqtProperty(str, notify=disclaimerChanged)
def disclaimer(self):
return '\n\n'.join(MOBILE_DISCLAIMER)
@pyqtProperty(bool, notify=canSignWithoutServerChanged)
def canSignWithoutServer(self):
return self._canSignWithoutServer
@pyqtProperty('QVariantMap', notify=keystoreChanged)
def keystore(self):
return self._keystore
@pyqtProperty(str, notify=otpSecretChanged)
def otpSecret(self):
return self._otpSecret
@pyqtProperty(str, notify=shortIdChanged)
def shortId(self):
return self._shortId
@pyqtSlot(str)
def otpSubmit(self, otp):
self._plugin.on_otp(otp)
@pyqtProperty(str, notify=remoteKeyStateChanged)
def remoteKeyState(self):
return self._remoteKeyState
@remoteKeyState.setter
def remoteKeyState(self, new_state):
if self._remoteKeyState != new_state:
self._remoteKeyState = new_state
self.remoteKeyStateChanged.emit()
@pyqtProperty('QVariantList', notify=billingModelChanged)
def billingModel(self):
return self._billingModel
def updateBillingInfo(self, wallet):
billingModel = []
price_per_tx = wallet.price_per_tx
for k, v in sorted(price_per_tx.items()):
if k == 1:
continue
item = {
'text': 'Pay every %d transactions' % k,
'value': k,
'sats_per_tx': v/k
}
billingModel.append(item)
self._billingModel = billingModel
self.billingModelChanged.emit()
@pyqtSlot()
def fetchTermsAndConditions(self):
def fetch_task():
try:
self.plugin.logger.debug('TOS')
tos = server.get_terms_of_service()
except ErrorConnectingServer as e:
self.termsAndConditionsError.emit(_('Error connecting to server'))
except Exception as e:
self.termsAndConditionsError.emit('%s: %s' % (_('Error'), repr(e)))
else:
self.termsAndConditionsRetrieved.emit(tos)
finally:
self._busy = False
self.busyChanged.emit()
self._busy = True
self.busyChanged.emit()
t = threading.Thread(target=fetch_task)
t.daemon = True
t.start()
@pyqtSlot(str)
def createKeystore(self, email):
self.remoteKeyState = ''
self._otpSecret = ''
self.otpSecretChanged.emit()
xprv1, xpub1, xprv2, xpub2, xpub3, short_id = self.plugin.create_keys()
def create_remote_key_task():
try:
self.plugin.logger.debug('create remote key')
r = server.create(xpub1, xpub2, email)
otp_secret = r['otp_secret']
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
except (socket.error, ErrorConnectingServer) as e:
self.remoteKeyState = 'error'
self.remoteKeyError.emit(f'Network error: {str(e)}')
except TrustedCoinException as e:
if e.status_code == 409:
self.remoteKeyState = 'wallet_known'
self._shortId = short_id
self.shortIdChanged.emit()
else:
self.remoteKeyState = 'error'
self.logger.warning(str(e))
self.remoteKeyError.emit(f'Service error: {str(e)}')
except (KeyError,TypeError) as e: # catch any assumptions
self.remoteKeyState = 'error'
self.remoteKeyError.emit(f'Error: {str(e)}')
self.logger.error(str(e))
else:
if short_id != _id:
self.remoteKeyState = 'error'
self.logger.error("unexpected trustedcoin short_id: expected {}, received {}".format(short_id, _id))
self.remoteKeyError.emit('Unexpected short_id')
return
if xpub3 != _xpub3:
self.remoteKeyState = 'error'
self.logger.error("unexpected trustedcoin xpub3: expected {}, received {}".format(xpub3, _xpub3))
self.remoteKeyError.emit('Unexpected trustedcoin xpub3')
return
self.remoteKeyState = 'new'
self._otpSecret = otp_secret
self.otpSecretChanged.emit()
self._shortId = short_id
self.shortIdChanged.emit()
finally:
self._busy = False
self.busyChanged.emit()
self._busy = True
self.busyChanged.emit()
t = threading.Thread(target=create_remote_key_task)
t.daemon = True
t.start()
@pyqtSlot()
def resetOtpSecret(self):
self.remoteKeyState = ''
xprv1, xpub1, xprv2, xpub2, xpub3, short_id = self.plugin.create_keys()
def reset_otp_task():
try:
# TODO: move reset request to UI agnostic plugin section
self.plugin.logger.debug('reset_otp')
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
rootnode = BIP32Node.from_xkey(xprv)
key = rootnode.subkey_at_private_derivation((0, 0)).eckey
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
otp_secret = r.get('otp_secret')
except (socket.error, ErrorConnectingServer) as e:
self.remoteKeyState = 'error'
self.remoteKeyError.emit(f'Network error: {str(e)}')
except Exception as e:
self.remoteKeyState = 'error'
self.remoteKeyError.emit(f'Error: {str(e)}')
else:
self.remoteKeyState = 'reset'
self._otpSecret = otp_secret
self.otpSecretChanged.emit()
finally:
self._busy = False
self.busyChanged.emit()
self._busy = True
self.busyChanged.emit()
t = threading.Thread(target=reset_otp_task, daemon=True)
t.start()
@pyqtSlot(str, int)
def checkOtp(self, short_id, otp):
def check_otp_task():
try:
self.plugin.logger.debug(f'check OTP, shortId={short_id}, otp={otp}')
server.auth(short_id, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
self.plugin.logger.debug('Invalid one-time password.')
self.otpError.emit(_('Invalid one-time password.'))
else:
self.plugin.logger.error(str(e))
self.otpError.emit(f'Service error: {str(e)}')
except Exception as e:
self.plugin.logger.error(str(e))
self.otpError.emit(f'Error: {str(e)}')
else:
self.plugin.logger.debug('OTP verify success')
self.otpSuccess.emit()
finally:
self._busy = False
self.busyChanged.emit()
self._busy = True
self.busyChanged.emit()
t = threading.Thread(target=check_otp_task, daemon=True)
t.start()
def __init__(self, *args):
super().__init__(*args)
@hook
def load_wallet(self, wallet: 'Abstract_Wallet'):
if not isinstance(wallet, self.wallet_class):
return
self.logger.debug(f'plugin enabled for wallet "{str(wallet)}"')
if wallet.can_sign_without_server():
self.so._canSignWithoutServer = True
self.so.canSignWithoutServerChanged.emit()
msg = ' '.join([
_('This wallet was restored from seed, and it contains two master private keys.'),
_('Therefore, two-factor authentication is disabled.')
])
self.logger.info(msg)
self.start_request_thread(wallet)
@hook
def init_qml(self, app: 'ElectrumQmlApplication'):
self.logger.debug(f'init_qml hook called, gui={str(type(app))}')
self._app = app
# important: QSignalObject needs to be parented, as keeping a ref
# in the plugin is not enough to avoid gc
self.so = Plugin.QSignalObject(self, self._app)
# extend wizard
self.extend_wizard()
# wizard support functions
def extend_wizard(self):
wizard = self._app.daemon.newWalletWizard
self.logger.debug(repr(wizard))
views = {
'trustedcoin_start': {
'gui': '../../../../plugins/trustedcoin/qml/Disclaimer',
'next': 'trustedcoin_choose_seed'
},
'trustedcoin_choose_seed': {
'gui': '../../../../plugins/trustedcoin/qml/ChooseSeed',
'next': lambda d: 'trustedcoin_create_seed' if d['keystore_type'] == 'createseed'
else 'trustedcoin_have_seed'
},
'trustedcoin_create_seed': {
'gui': 'WCCreateSeed',
'next': 'trustedcoin_confirm_seed'
},
'trustedcoin_confirm_seed': {
'gui': 'WCConfirmSeed',
'next': 'trustedcoin_tos_email'
},
'trustedcoin_have_seed': {
'gui': 'WCHaveSeed',
'next': 'trustedcoin_keep_disable'
},
'trustedcoin_keep_disable': {
'gui': '../../../../plugins/trustedcoin/qml/KeepDisable',
'next': lambda d: 'trustedcoin_tos_email' if d['trustedcoin_keepordisable'] != 'disable'
else 'wallet_password',
'accept': self.recovery_disable,
'last': lambda v,d: wizard.is_single_password() and d['trustedcoin_keepordisable'] == 'disable'
},
'trustedcoin_tos_email': {
'gui': '../../../../plugins/trustedcoin/qml/Terms',
'next': 'trustedcoin_show_confirm_otp'
},
'trustedcoin_show_confirm_otp': {
'gui': '../../../../plugins/trustedcoin/qml/ShowConfirmOTP',
'accept': self.on_accept_otp_secret,
'next': 'wallet_password',
'last': lambda v,d: wizard.is_single_password()
}
}
wizard.navmap_merge(views)
# combined create_keystore and create_remote_key pre
def create_keys(self):
wizard = self._app.daemon.newWalletWizard
wizard_data = wizard._current.wizard_data
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(wizard_data['seed'], wizard_data['seed_extra_words'])
# NOTE: at this point, old style wizard creates a wallet file (w. password if set) and
# stores the keystores and wizard state, in order to separate offline seed creation
# and online retrieval of the OTP secret. For mobile, we don't do this, but
# for desktop the wizard should support this usecase.
data = {'x1/': {'xpub': xpub1}, 'x2/': {'xpub': xpub2}}
# Generate third key deterministically.
long_user_id, short_id = get_user_id(data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
return (xprv1,xpub1,xprv2,xpub2,xpub3,short_id)
def on_accept_otp_secret(self, wizard_data):
self.logger.debug('OTP secret accepted, creating keystores')
xprv1,xpub1,xprv2,xpub2,xpub3,short_id = self.create_keys()
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
k3 = keystore.from_xpub(xpub3)
wizard_data['x1/'] = k1.dump()
wizard_data['x2/'] = k2.dump()
wizard_data['x3/'] = k3.dump()
def recovery_disable(self, wizard_data):
if wizard_data['trustedcoin_keepordisable'] != 'disable':
return
self.logger.debug('2fa disabled, creating keystores')
xprv1,xpub1,xprv2,xpub2,xpub3,short_id = self.create_keys()
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k3 = keystore.from_xpub(xpub3)
wizard_data['x1/'] = k1.dump()
wizard_data['x2/'] = k2.dump()
wizard_data['x3/'] = k3.dump()
# running wallet functions
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
self.logger.debug('prompt_user_for_otp')
self.on_success = on_success
self.on_failure = on_failure if on_failure else lambda x: self.logger.error(x)
self.wallet = wallet
self.tx = tx
qewallet = QEWallet.getInstanceFor(wallet)
qewallet.request_otp(self.on_otp)
def on_otp(self, otp):
if not otp:
self.on_failure(_('No auth code'))
return
self.logger.debug(f'on_otp {otp} for tx {repr(self.tx)}')
try:
self.wallet.on_otp(self.tx, otp)
except UserFacingException as e:
self.on_failure(_('Invalid one-time password.'))
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
self.on_failure(_('Invalid one-time password.'))
else:
self.on_failure(_('Service Error') + ':\n' + str(e))
except Exception as e:
self.on_failure(_('Error') + ':\n' + str(e))
else:
self.on_success(self.tx)
def billing_info_retrieved(self, wallet):
self.logger.info('billing_info_retrieved')
qewallet = QEWallet.getInstanceFor(wallet)
qewallet.billingInfoChanged.emit()
self.so.updateBillingInfo(wallet)
|
from skimage.io import imread, imsave
import matplotlib.pyplot as plt
import numpy as np
import os
import warnings
def read_rich_labels(path):
"""
Checks the structure of your rich_labels.txt file.
Returns a dictionary:
key: file name
value: a tuple of floats (<latitude>, <longitude>)
"""
location_dict = {}
with open(os.path.join(path,'rich_labels.txt')) as f:
content = f.readlines()
for line in content:
linecontent = line.split()
# make sure each line is structured as follows:<image name> <latitude> <longitude>
assert len(linecontent) >= 3, "Unexpectedly short line in rich_labels.txt: " + line
if len(linecontent) > 3:
warnings.warn('Unexpected line in rich_labels.txt: ' + line +
'\n Using first three words: ' + str(linecontent), stacklevel=0)
try:
location_dict[linecontent[0]] = (float(linecontent[1]),float(linecontent[2]))
# make sure you have latitude and longitude coordinates are not flipped
# assuming that images are from North America
assert float(linecontent[1]) <= float(linecontent[2])
except ValueError as e:
warnings.warn("Unexpected lat/long in rich_labels.txt: " +
str(linecontent[1:3]), stacklevel=0)
return location_dict
def read_image_files(path):
"""
Returns a dictionary:
key: file name (string)
value: (256, 256, 3) array (integer)
"""
img_dict = {}
for f in os.listdir(path):
ext = os.path.splitext(f)[1]
assert ext.lower() != '.jpg', "Make sure you do not save the pictures as .jpg files,"
if ext.lower() != '.png':
continue
img_dict[f] = imread(os.path.join(path,f))
# make sure that you have cropped and/or resized your images before this step
if img_dict[f].shape != (256,256,3):
warnings.warn("Unexpected image size: " + str(img_dict[f].shape),stacklevel=0)
return img_dict
def plot_20_images(img_dict, title_dict):
"""
Plot the resized images.
"""
plt.figure(figsize=(8,8))
filelist = list(img_dict.keys())
for i in range(20):
plt.subplot(4,5,i+1)
if i < len(filelist):
plt.imshow(img_dict[filelist[i]])
try:
plt.title(title_dict[filelist[i]])
except KeyError as e:
warnings.warn("Key missing from title dictionary for "+filelist[i],stacklevel=0)
plt.tight_layout()
acc_imgs = read_image_files('./data/acc/')
acc_locs = read_rich_labels('./data/acc/')
plot_20_images(acc_imgs, acc_locs)
inacc_imgs = read_image_files('./data/inacc/')
inacc_locs = read_rich_labels('./data/inacc/')
plot_20_images(inacc_imgs, inacc_locs)
plt.show()
|
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn import tree
import warnings
import sys
import cv2
from skimage import io, color, img_as_ubyte
from skimage.feature import greycomatrix, greycoprops
from sklearn.metrics.cluster import entropy
from scipy.stats import skew
warnings.filterwarnings('ignore')
#droppedFile = sys.argv[1]
#print droppedFile
#filenya = droppedFile
#pdb.set_trace()
images = ['./TEST/BlueTangFish.jpeg','./TEST/ClownFish.jpg',
'./TEST/PariTutul.jpg','./TEST/YellowButterflyFish.jpg']
for x in images:
image = cv2.imread(x)
red, green, blue = cv2.split(image)
image2 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#threshold dulu baru HU's Momment
ret,image3 = cv2.threshold(image2,127,255,cv2.THRESH_BINARY)
#Fitur bentuk 7
#humoment = cv2.HuMoments(cv2.moments(image2)).flatten()
humoment = cv2.HuMoments(cv2.moments(image3)).flatten()
#fitur tekstur 48
grayImg = img_as_ubyte(color.rgb2gray(image))
distances = [1, 2, 3]
angles = [0, np.pi/4, np.pi/2, 3*np.pi/4]
properties = ['energy', 'dissimilarity', 'contrast', 'homogeneity']
glcm = greycomatrix(grayImg,
distances=distances,
angles=angles,
symmetric=True,
normed=True)
feats = np.hstack([greycoprops(glcm, prop).ravel() for prop in properties])
#fitur warna 9
red, green, blue = cv2.split(image)
fr, frsd, varr = np.mean(red), np.std(red), np.var(red)
fg, fgsd, varg = np.mean(green), np.std(green), np.var(green)
fb, fbsd, varb = np.mean(blue), np.std(blue), np.var(blue)
ciriwarna = np.array([fr, frsd, fg, fgsd,fb, fbsd, varr, varg, varb])
feats = np.concatenate((feats, ciriwarna), axis=0)
feats = np.concatenate((feats, humoment), axis=0)
datafitur = list(feats)
data = pd.read_csv('./DataHewanLaut.csv')
y = data.KELAS
X = data.drop('KELAS', axis=1)
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X, y)
X_test = np.array(datafitur).reshape(1, -1)
print(X_test)
y_predict = clf.predict(X_test)
print(y_predict)
#y_predict2 = clf.predict_proba(X_test)
#proba = max(y_predict2[0,i] for i in range(1))*100
#print "Probrabilitas = " + str(proba) + "%"
|
from __future__ import unicode_literals, print_function, absolute_import
from marshmallow import MarshalResult
from marshmallow import Schema, ValidationError
from restie.exceptions import InvalidArgumentsError
from werkzeug.datastructures import CombinedMultiDict, MultiDict
from .base import MethodsDecoratorEntrypoint
from .collector import ApiMethodsCollector
class MarshalWith(MethodsDecoratorEntrypoint):
collector = ApiMethodsCollector()
def __init__(self, schema, strict=True, **kwargs):
assert issubclass(schema, Schema), 'Invalid schema!'
self.schema = schema(strict=strict, **kwargs)
def setup(self):
self.collector.register_provider(self)
def stop(self):
self.collector.unregister_provider(self)
super(MarshalWith, self).stop()
def validate(self, request, **kwargs):
payload = CombinedMultiDict([
request.args.copy(),
request.form.copy(),
MultiDict(kwargs.copy())
])
return self.schema.dump(payload).data, self.schema.validate(payload)
def process_request(self, request, *args, **kwargs):
try:
data, errors = self.validate(request, **kwargs)
if errors:
raise InvalidArgumentsError(errors)
except ValidationError as e:
raise InvalidArgumentsError(e.messages)
setattr(request, 'valid', MarshalResult(data, errors))
return request
|
#Problem ID: ENTEXAM
#Problem Name: Entrance Exam
for _ in range(int(input())):
n, k, e, m = map(int, input().split())
l = []
for i in range(n-1):
l.append(sum(list(map(int, input().split()))))
ser = sum(list(map(int, input().split())))
l.sort(reverse = True)
min_score = l[k-1]+1 -ser
if(min_score <= m):
print(min_score) if min_score > 0 else print(0)
else:
print("Impossible")
|
'''
定制类
看到类似__slots__这种形如__xxx__的变量或者函数名就要注意,这些在Python中是有特殊用途的。
'''
'''
__str__
'''
class Student(object):
def __init__(self, name):
self.name = name
print(Student('Mic'))
class Student(object):
def __init__(self, name):
self.name = name
def __str__(self):
return 'Student object (name:%s)' % self.name
print(Student('Mic'))
# __str__()返回用户看到的字符串,而__repr__()返回程序开发者看到的字符串,也就是说,__repr__()是为调试服务的。
s = Student('Michael')
print(s)
print(s.__repr__())
'''
__iter__
如果一个类想被用于for ... in循环,类似list或tuple那样,就必须实现一个__iter__()方法,该方法返回一个迭代对象
然后,Python的for循环就会不断调用该迭代对象的__next__()方法拿到循环的下一个值,直到遇到StopIteration错误时退出循环。
'''
# 以斐波那契数列为例,写一个Fib类,可以作用于for循环:
class Fib(object):
def __init__(self):
self.a,self.b = 0,1
def __iter__(self):
return self
def __next__(self):
self.a,self.b = self.b,self.a+self.b
if self.a > 100000:
raise StopIteration()
return self.a
for n in Fib():
print(n)
'''
__getitem__
Fib实例虽然能作用于for循环,看起来和list有点像,但是,把它当成list来使用还是不行
print(Fib()[5])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: 'Fib' object does not support indexing
要表现得像list那样按照下标取出元素,需要实现__getitem__()方法:
'''
class Fib(object):
def __getitem__(self, n):
a,b = 1,1
for x in range(n):
a,b = b, a+b
return a
f = Fib()
print(f[0])
print(f[10])
print(f[20])
print(f[30])
print(f[40])
print(f[50])
# 切片功能
# __getitem__()传入的参数可能是一个int,也可能是一个切片对象slice,所以要做判断:
class Fib(object):
"""docstring for Fib"""
def __getitem__(self, n):
if isinstance(n,int):
a,b = 1,1
for x in range(n):
a,b = b,a+b
return a
if isinstance(n,slice):
start = n.start
stop = n.stop
if start is None:
start = 0
a,b = 1,1
L = []
for x in range(stop):
if x >= start:
L.append(a)
a,b = b,a+b
return L
f = Fib()
print(f[0:10])
print(f[:20])
'''
__getattr__
正常情况下,当我们调用类的方法或属性时,如果不存在,就会报错。
'''
class Student(object):
def __init__(self):
self.name = 'Michael'
def __getattr__(self,attr):
if attr == 'score':
return 99
return attr
s = Student()
print(s.name)
print(s.score)
print(s.asd)
# 链式调用
class Chain(object):
def __init__(self, path = ''):
self._path = path
def __getattr__(self,path):
return Chain('%s/%s' % (self._path,path))
def __str__(self):
return self._path
__repr__ = __str__
print(Chain().status.user.timeline.list)
'''
__call__
一个对象实例可以有自己的属性和方法,当我们调用实例方法时,我们用instance.method()来调用。
'''
# 任何类,只需要定义一个__call__()方法,就可以直接对实例进行调用。
class Student(object):
def __init__(self, name):
self.name = name
def __call__(self):
print('My name is %s.' % self.name)
s = Student('Michael')
s()
Student('jue')()
# 怎么判断一个变量是对象还是函数呢?
# 其实,更多的时候,我们需要判断一个对象是否能被调用,能被调用的对象就是一个Callable对象
# 比如函数和我们上面定义的带有__call__()的类实例:
print(callable(Student('')))
print(callable(max))
print(callable([1,2,3]))
print(callable(None))
print(callable('str'))
|
import sys
import configparser
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from time import sleep
inifile = configparser.SafeConfigParser()
inifile.read('/Users/TK/project/auto_point/config.ini')
mail = inifile.get('voicenote', 'id')
passwd = inifile.get('voicenote', 'pass')
login_url = "http://www.voicenote.jp/"
driver = webdriver.Chrome('/Users/TK/project/auto_point/chromedriver')
driver.get(login_url)
try:
form = driver.find_element_by_tag_name('form')
except NoSuchElementException:
driver.quit()
sys.exit()
for tag in form.find_elements_by_tag_name('input'):
name = tag.get_attribute('name')
if name == "login_email":
tag.send_keys(mail)
elif name == "login_pass":
tag.send_keys(passwd)
elif name == "login":
tag.submit()
break
links = []
first = True
while True:
ul_tag = driver.find_element_by_css_selector("ul.enquete-list")
li_tags = ul_tag.find_elements_by_tag_name("li")
for tag in li_tags:
try:
a_tag = tag.find_element_by_tag_name("a")
links.append(str(a_tag.get_attribute('href')))
except NoSuchElementException:
break
try:
div_tag = driver.find_element_by_css_selector("div.pagination")
if first == True:
a_tag = div_tag.find_element_by_tag_name("a")
driver.get(str(a_tag.get_attribute('href')))
first = False
sleep(1)
continue
except NoSuchElementException:
break
a_tags = div_tag.find_elements_by_tag_name("a")
if len(a_tags) <= 1:
break
driver.get(str(a_tags[1].get_attribute('href')))
sleep(1)
for link in links:
print(link)
driver.get(link)
sleep(1)
while True:
try:
form = driver.find_element_by_tag_name("form")
tbody = form.find_element_by_tag_name("tbody")
tr_tags = tbody.find_elements_by_tag_name("tr")
except NoSuchElementException:
break
for tag in tr_tags:
try:
text = tag.find_element_by_tag_name("textarea")
text.send_keys("特になし")
continue
except NoSuchElementException:
pass
try:
select = tag.find_element_by_tag_name("select")
option = select.find_elements_by_tag_name("option")[1]
select_element = Select(select)
value = str(option.get_attribute('value'))
select_element.select_by_value(value)
continue
except NoSuchElementException:
pass
try:
input = tag.find_element_by_tag_name("input")
except NoSuchElementException:
continue
type = input.get_attribute("type")
if type == "radio" or type == "checkbox":
driver.execute_script("arguments[0].click();", input)
continue
page = form.find_elements_by_css_selector("div.pagination")
if len(page) > 0:
a_tags = page[0].find_elements_by_tag_name("a")
for a_tag in a_tags:
if a_tag.text.find("次") > -1:
driver.execute_script("arguments[0].click();", a_tag)
sleep(1)
continue
else:
conf = form.find_element_by_css_selector("input#confirm")
driver.execute_script("arguments[0].click();", conf)
sleep(1)
form_tags = driver.find_elements_by_tag_name("form")
for form in form_tags:
name = form.get_attribute("name")
if name == "form_complete":
input = form.find_element_by_css_selector("input#complete")
driver.execute_script("arguments[0].click();", input)
sleep(1)
break
driver.quit() |
from gen3.tools.metadata.ingest_manifest import async_ingest_metadata_manifest
from gen3.tools.metadata.ingest_manifest import async_query_urls_from_indexd
from gen3.tools.metadata.verify_manifest import async_verify_metadata_manifest
|
#!/usr/bin/python
def outlierCleaner(predictions, ages, net_worths):
"""
clean away the 10% of points that have the largest
residual errors (different between the prediction
and the actual net worth)
return a list of tuples named cleaned_data where
each tuple is of the form (age, net_worth, error)
"""
### your code goes here
errors = abs(predictions - net_worths)
#print predictions[0][0]
#print net_worths[0][0]
#print errors[0][0]
#using zip
not_cleaned_data = zip(ages,net_worths,errors)
#print cleaned_data
#sorting ,ref: http://stackoverflow.com/questions/13669252/what-is-key-lambda
not_cleaned_data.sort(key=lambda tup: tup[2])
#print not_cleaned_data
#keeping only 90% data means, 0.9*lenth of net_worths
cleaned_data = not_cleaned_data[:int(len(net_worths)*0.9)]
#print cleaned_data
return cleaned_data |
"""
Дан список чисел. Определите, сколько в этом списке элементов, которые больше двух своих соседей (слева и справа),
и выведите количество таких элементов. Крайние элементы списка никогда не учитываются,
поскольку у них недостаточно соседей.
"""
# # Вариант 1
# from random import randint
#
# lst = [randint(1, 20) for _ in range(20)]
# print('Our list:', lst)
# summ = 0
# for idx in range(1, len(lst) - 1):
# if lst[idx] > lst[idx - 1] and lst[idx] > lst[idx + 1]:
# summ += 1
# print('Number of Greatest Neighbours: "{}".'.format(summ))
# # Вариант 2
from random import randint
lst = [randint(1, 20) for _ in range(20)]
print('Our list:', lst)
print('Number of Greatest Neighbours: "{}".'.format(len([element for idx, element in enumerate(lst[1:-1]) if element > lst[idx] and element > lst[idx + 2]])))
|
#################################################################### MODULE COMMENTS ############################################################################
#The following class is a python object that takes in the libraries: Nunmpy, Pandas, Sys and Random. #
#The python Object "DataProcessor" that is created below is a python object designed to take in a pandas dataframe and formats the data such that is can be #
#Run into a Naive Bayes learning algorithm. The data processing function can discretize an entire dataset, and remove missing attribute values from a dataset #
#The removal of missing attribute values is done first by identifying the percentage of rows that have missing data, if that percentage is less than 5% then we #
#Drop all of rows that have a missing value. A similar tactic is pursued for columns missing data, if the percentage of columns missing data is less than 5% #
#Then we drop the entire column. If the value is greater than 5 % then we randomly generate a new value to replace the missing attribute in the data set #
#################################################################### MODULE COMMENTS ############################################################################
import pandas as pd
import numpy as np
import sys
import random
import copy
import math
import TrainingAlgorithm
class DataProcessor:
#On the creation of a Dataprocessor object set the following values
def __init__(self, bin_count):
#Total number of bins to bin the non-discrete values
self.bin_count = bin_count
#Set the percentage of missing values to be dropped
self.PercentBeforeDrop = 10.00
#Set the missing value row index to an empty set
self.MissingRowIndexList = set()
#SEt the missing value column index to an empty set
self.MissingColumnNameList = set()
#Parameters: Pandas DataFrame
#Returns: Clean ready to process Dataframe
#Function: This is the main function that should be called for each object that takes in the dataframe, processes it and returns the clean dataframe
def StartProcess(self, df:pd.DataFrame) -> pd.DataFrame:
#Get a deep copy of the dataframe
df1 = copy.deepcopy(df)
#SEt the count to 0
count = 0
#For each of the columns in the dataframe
for i in range(len(df.columns)):
#If the count is at the last column in the dataframe end because this is the classifier
if count == len(df.columns)-1:
#Break
break
#bin Integers
#If the type of the dataframe is a float then we need to discretize
if type(df1.iloc[0][i]) == np.float64:
#Find which column needs to be discretized
df1 = self.discretize(df1,i)
#Increment the count
count+=1
#Go to the next one
continue
#If the data frame has missing attributes
if self.has_missing_attrs(df1):
#Remove the missing attributes
df1 = self.fix_missing_attrs(df1)
#Increment the count
count+=1
#Return the cleaned dataframe
return df1
#NIU
#Parameters: Pandas DataFrame, Integer Column
#Returns: Dataframe -> with all values randomly assigned
#Function: Take in a dataframe and weight each value in the dataframe with an occurence then fill in a missing attribute based on the weight of the value in the dataframe
def RandomRollInts(self, df: pd.DataFrame, col) -> pd.DataFrame:
#Set the min value to the first value in the dataframe
Min = df.iloc[1][col]
#Set the max value to the first value in the dataframe
Max = df.iloc[1][col]
#Loop through each row in the dataframe
for i in range(self.CountTotalRows(df)):
#If the Value in the dataframe is a missing value
if self.IsMissingAttribute(df.iloc[i][col]):
#Do nothing
continue
#Otherwise
else:
#If the value in the dataframe is greater than the recorded max
if df.iloc[i][col] > Max:
#Assign this value to the max
Max = df.iloc[i][col]
#Go to the next
continue
#If the value is less than the recorded min
elif df.iloc[i][col] < Min:
#Assign the new min value
Min = df.iloc[i][col]
#Go to the next
continue
#Go to the next
continue
#For each of the columns in the dataframe
for col in range(self.TotalNumberColumns(df)):
#For each of the rows in the dataframe
for row in range(self.TotalNumberRows(df)):
#If the value in the dataframe is a missing attribute
if self.IsMissingAttribute(df.iloc[col][row]):
#Assign the roll to a random value between min and max
roll = random.randint(Min,Max)
#Assign the random value about to the position in the dataframe
df.loc[row,col] = roll
#Return the dataframe
return df
#Parameters: Pandas DataFrame
#Returns: A dataframe with all missing values filled in with a Y or N
#Function: Take in a dataframe and randomly assigned a Y or a N to a missing value
def RandomRollVotes(self, df: pd.DataFrame) -> pd.DataFrame:
#Loop through each of the rows in the dataframe
for i in range(len(df)):
#loop through all of the columns except the classification column
for j in range(len(df.columns)-1):
#If the given value in the dataframe is missing a value
if self.IsMissingAttribute(df.iloc[i][j]):
#Randomly assign a value from 1 - 100
roll = random.randint(0,99) + 1
#If the roll is greater than 50
if roll >50:
#Assign the value to a Y
roll = 'y'
#Otherwise
else:
#Assign the value to a N
roll = 'n'
#Set the position in the dataframe equal to the value in the roll
df.iloc[i][j] = roll
#Go to the next
continue
#Return the dataframe
return df
#NIU
#Parameters: Pandas DataFrame
#Returns: Integer: Number of times a value appears in a dataframe
#Function: Take in column, a dataframe and a value and return the number of times that value appears in the given column in the dataframe
def Occurence(self,Column,df:pd.DataFrame,Value) -> int:
#Set count to 0
count = 0
#Loop through each row in the dataframe
for i in range(len(df)):
#If the value in the columna and given row is equal to the value taken in as a parameter
if df.iloc[i][Column] == Value:
#Increment the count
count += 1
#Go to the next one
continue
#Reuturn the total count
return count
#NIU
#Parameters: Pandas DataFrame
#Returns: Dataframe with all of the int values filled in
#Function: Take in a dataframe and fill in each missing value with a random value based on the weight of the occurence of the given values in the dataframe
def StatsFillInInts(self,df:pd.DataFrame) -> pd.DataFrame:
#Set a weighted vote string
WeightedVote = ''
#Set a unweighted vote string
UnweightedVote = ''
#For each column in the data frame
for col in range(self.TotalNumberColumns(df)):
#Go through each row
for row in range(self.TotalNumberRows(df)):
#If the given cell value is missing
if self.IsMissingAttribute(df.iloc[col][row]):
#Get the total number os yes votes in the column
yay = self.Occurence(col,df,'y')
#Get the total number of no votes in the column
nay = self.occurence(col,df,'n')
#Get the total number of percent Yays
PercentYay = (yay/ len(df))
#Get the total percent of Nays
PercentNay = (nay/len(df))
#If we have more yes's than nos
if PercentYay > PercentNay:
#Set a max value to the percent yes's
Max = PercentYay
#SEt nay to be the remaining count
PercentNay = 1 - PercentYay
#Set the weighted vote value
WeightedVote = 'y'
#SEt the unweighted vote value
UnweightedVote ='n'
#Otherwise
else:
#Set the max equal to the percent saying no
Max = PercentNay
#Set the percent saying yes to the inverse of percent no
PercentYay = 1 - PercentNay
#Set the weighted vote to N
WeightedVote = 'n'
# Set the unweighted vote to Y
UnweightedVote ='y'
#Randomly roll a value between 0 and 1
Stats = random()
#If the stat rolled it greater than max
if Stats > Max:
#Set the dataframe equal to the weighted vote
df.iloc[col][row] = WeightedVote
#Otherwise
else:
#Set the dataframe equal to the unweighted vote
df.iloc[col][row] = UnweightedVote
#Return the Dataframe
return df
#Parameters: Pandas DataFrame
#Returns: Bool if the dataframe has a missing attribute in it
#Function: Takes in a data frame and returns true if the data frame has a ? value somewhere in the frame
def has_missing_attrs(self, df: pd.DataFrame) -> bool:
#For each row in the dataframe
for row in range(self.CountTotalRows(df)):
#For each column in the dataframe
for col in range(self.NumberOfColumns(df)):
#If the dataframe has a missing value in any of the cells
if self.IsMissingAttribute(df.iloc[row][col]):
#Return true
return True
#Go to the next value
continue
#We searched the entire list and never returned true so return false
return False
#Parameters: Pandas DataFrame
#Returns: Cleaned Dataframe
#Function: Take in a dataframe and an index and return a new dataframe with the row corresponding to the index removed
def KillRow(self, df: pd.DataFrame,index) -> pd.DataFrame:
return df.drop(df.Index[index])
#Parameters: Attribute Value
#Returns: Bool -> True if the value is a missing value
#Function: Take in a given value from a data frame and return true if the value is a missing value false otherwise
def IsMissingAttribute(self, attribute) -> bool:
#Return true if the value is ? or NaN else return false
return attribute == "?" or attribute == np.nan
#Parameters: Pandas DataFrame
#Returns: Clean Dataframe with not missing values
#Function: This function takes in a dataframe and returns a dataframe with all rows contianing missing values removed
def KillRows(self,df: pd.DataFrame) -> pd.DataFrame:
# For each of the rows missing a value in the dataframe
for i in self.MissingRowIndexList:
#Set the dataframe equal to the dataframe with the row missing a value removed
df = df.drop(df.index[i])
#Clear out all of the data in the set as to not try and drop these values again
self.MissingRowIndexList = set()
df.reset_index(drop=True, inplace=True)
#Return the dataframe
return df
#Parameters: Pandas DataFrame
#Returns: Dataframe with all columns with missing values dropped
#Function: This function takes in a dataframe and drops all columns with missing attributes
def KillColumns(self,df: pd.DataFrame) -> pd.DataFrame:
#For each of the columns with missing attributes which is appending into a object list
for i in self.MissingColumnNameList:
#Set the dataframe equal to the dataframe with these values dropped
df = df.drop(i,axis=1)
#Set the object list back to an empty set as to not try and drop these columns again
self.MissingColumnNameList = set()
#Returnn the dataframe
return df
#Takes in a dataframe and populates attributes based on the existing distribution of attribute values
#Parameters: Pandas DataFrame
#Returns: a Data frame with no missing attributes
#Function: Take in a given dataframe and replace all missing attributes with a randomly assigned value
def fix_missing_attrs(self, df: pd.DataFrame) -> pd.DataFrame:
#Get the total percentage of rows missing values in the dataframe
PercentRowsMissing = self.PercentRowsMissingValue(df)
#Get the total number of columns missing values in the dataframe
PercentColumnsMissingData = self.PercentColumnsMissingData(df)
#If the total number of rows missing data is less than the value specified in the init
if(PercentRowsMissing < self.PercentBeforeDrop):
#Return the dataframe that removes all rows with missing values
return self.KillRows(df)
#If the percentage of columns missing values is less than the value specified in the init
elif(PercentColumnsMissingData < self.PercentBeforeDrop):
#Return the dataframe with all columns including missing values dropped
return self.KillColumns(df)
#otherwise
else:
#If the Data frame has no missing attributes than the Data frame is ready to be processed
if self.has_missing_attrs(df) == False:
#Return the dataframe
return df
#Find the Type of the first entry of data
types = type(df.iloc[1][1])
#If it is a string then we know it is a yes or no value
if types == str:
#Set the dataframe equal to the dataframe with all missing values randmoly generated
df = self.RandomRollVotes(df)
#Else this is an integer value
else:
#Set the dataframe equal to the dataframe with all missing values randmoly generated
df =self.RandomRollInts(df)
#Return the dataframe
return df
# https://thispointer.com/pandas-get-frequency-of-a-value-in-dataframe-column-index-find-its-positions-in-python/
# if only small percent of examples have missing attributes, remove those examples.
# i.e. check rowwise, calculate percentage
# if only a small fraction of columns (e.g. 2/12) have missing attributes, remove those columns.
# i.e. check columnwise, calculate percentage
# if many datapoints across many columns have missing attributes, generate at random to match column distribution.
# find attribute value distribution across discrete options (find min/max?) Use pandas stats for this
#Parameters: Pandas DataFrame
#Returns: Boolean value: true or false
#Function: Take in a dataframe and identify if the dataframe has non-discrete values
def has_continuous_values(self, df: pd.DataFrame) -> bool:
#For each column in the data frame
for col in df:
# if number of unique values is greater than threshold, consider column continuous-valued
if df[col].nunique() > self.discrete_threshold:
#Return true
return True
#If we never returned true and we are out of the loop return false
return False
#Parameters: Pandas DataFrame, Integer Column Number
#Returns: DataFrame: New discretized values
#Function: Takes in a dataframe and a column number of the data frame and bins all values in that column to discretize them
def discretize(self, df: pd.DataFrame,col) -> pd.DataFrame:
#Set a min variable to a large number
Min = 100000
#Set a max number to a small value
Max = -1
#For each of the rows in the data frame
for i in range(self.CountTotalRows(df)):
#Store the value at the given position in the column of the dataframe
Value = df.iloc[i][col]
#If the value is a missing attribute
if self.IsMissingAttribute(Value):
#Do nothing
continue
#Otherwise
else:
#If the value is bigger than the max then we need to set the new max value
if Value > Max:
#Max is equal to the new value
Max = Value
#Go back to the top of the loop
continue
#If the value is less than the min set the new min value
elif Value < Min:
#Min is now equal to the value in the given dataframe
Min = Value
#Go back to the top of the loop
continue
#Go back to the top of the loop
continue
#Set the delta to be the difference between the max and the min
Delta = Max - Min
#Set the binrange to be the delta divided by the number of mins which is set in init
BinRange = Delta / self.bin_count
#Create an empty list
Bins = list()
#Loop through the number of bins
for i in range(self.bin_count):
#If we are at the first bin
if i == 0:
#Set the bin value to be the min + the offset between each bin
Bins.append(Min + BinRange)
#Otherwise
else:
#Set the bin to be the position in the bin list multiplied by the bin offset + the min value
Bins.append(((i+1) * BinRange) + Min)
#Loop through all of the rows in the given dataframe
for row in range(self.CountTotalRows(df)):
#Store the value of a given position in the dataframe
Value = df.iloc[row][col]
#Loop through each of the bins
for i in range(len(Bins)):
value = df.at[row,df.columns[col]]
#If we are at the last bin and have not been assigned a bin
if i == len(Bins)-1:
#Set the value to be the last bin
df.at[row,df.columns[col]] = i +1
print("Value " +str( value) + " binned to value " + str(i+1))
#Break out
break
#Otherwise if the value is less than the value stored to be assigned a given bin
elif Value < Bins[i]:
#Set the row to be that bin value
df.at[row,df.columns[col]] = i + 1
#Break
print("Value " +str( value) + " binned to value " + str(i+1))
break
print("Value ", value, " binned to value ", i+1)
#Return the new changed dataframe
return df
#Parameters: Pandas DataFrame
#Returns: Integer; Total number of rows in a dataframe
#Function: Take in a dataframe and return the number of rows in the dataframe
def CountTotalRows(self,df: pd.DataFrame) -> int:
#Return the total number of rows in the data frame
return len(df)
#Parameters: Pandas DataFrame
#Returns: Integer; Number of rows missing values
#Function: Take in a dataframe and return the number of rows in the dataframe with missing attribute values
def CountRowsMissingValues(self,df: pd.DataFrame ) -> int:
#Set a Counter Variable for the number of columns in the data frame
Count = 0
#Set a counter to track the number of rows that have a missing value
MissingValues = 0
#Get the total number of rows in the data set
TotalNumRows = self.CountTotalRows(df)
#For each of the columns in the data frame
for i in df:
#increment by 1
Count+=1
#For each of the records in the data frame
for i in range(TotalNumRows):
#For each column in each record
for j in range(Count):
#If the specific value in the record is a ? or a missing value
if self.IsMissingAttribute(df.iloc[i][j]):
#Increment Missing Values
MissingValues+=1
self.MissingRowIndexList.add(i)
#Go to the next one
continue
#Go to the next ones
continue
#Return the number of rows missing values in the data set
return MissingValues
#Parameters: Pandas DataFrame
#Returns: float; Percent rows missing data
#Function: Take in a dataframe and count the number of rows with missing attributes, return the percentage value
def PercentRowsMissingValue(self,df: pd.DataFrame) -> float:
#Get the total number of rows in the dataset
TotalNumRows = self.CountTotalRows(df)
#Get the total number of rows with missing values
TotalMissingRows = self.CountRowsMissingValues(df)
#Return the % of rows missing values
return (TotalMissingRows/TotalNumRows) * 100
#Parameters: Pandas DataFrame
#Returns: Integer; Number of columns with missing attributes
#Function: Return a count of the number of columns with atleast one missing attribute value in the data frame
def ColumnMissingData(self,df: pd.DataFrame) -> int:
#Create a counter variable to track the total number of columns missing data
Count = 0
#Store the total number of columns in the data set
TotalNumberColumns = self.NumberOfColumns(df)
#Store the total number of rows in the data set
TotalNumberRows = self.CountTotalRows(df)
#For each of the columns in the dataset
for j in range(TotalNumberColumns):
#For each of the records in the data set
for i in range(TotalNumberRows):
#If the value at the specific location is ? or a missing value
if self.IsMissingAttribute(df.iloc[i][j]):
#Increment the counter
Count+=1
Names = df.columns
self.MissingColumnNameList.add(Names[j])
#Break out of the loop
break
#Go to the next record
continue
#Return the count variable
return Count
#Parameters: Pandas DataFrame
#Returns: Integer; Number of columns
#Function: Take in a given dataframe and count the number of columns in the dataframe
def NumberOfColumns(self,df: pd.DataFrame) -> int:
#Create a counter variable
Count = 0
#For each of the columns in the dataframe
for i in df:
#Increment Count
Count+=1
#Return the total number of Columns
return Count
#Parameters: Pandas DataFrame
#Returns: Float; The percentage of columns with missing data
#Function: Take in a given dataframe and find the total number of columns divided by the number of columns with missing attribute values
def PercentColumnsMissingData(self,df: pd.DataFrame) -> float:
#Total Number of Columns in the dataset
TotalNumberColumns = self.NumberOfColumns(df)
#Total number of columns missing values in the dataset
TotalMissingColumns = self.ColumnMissingData(df)
#Return the percent number of columns missing data
return (TotalMissingColumns/TotalNumberColumns) * 100
#Parameters: Pandas DataFrame
#Returns: None
#Function: This is a test function that will print every cell to the screen that is in the dataframe
def PrintAllData(self,df:pd.DataFrame) -> None:
#For each of the rows in the dataframe
for i in range(len(df)):
#For each of the columns in the dataframe
for j in range(len(df.columns)):
#Print the value in that position of the dataframe
print(df.iloc[i][j])
#Unit Testing the object created above
#Code not run on creation of object just testing function calls and logic above
if __name__ == '__main__':
NumberBins = 32
#Location of each data file stored off into variables for later retrieval of data
VoteData = 'Vote_Data/Votes.data'
IrisData = 'Iris_Data/iris.data'
GlassData = 'Glass_Data/glass.data'
CancerData = 'Breast_Cancer_Data/cancer.data'
SoybeanData = 'Soybean_Data/soybean.data'
#DataFrame With Voting data
df = pd.read_csv(VoteData,index_col = False )
#print(df)
#DataFrame With Iris data
df1 = pd.read_csv(IrisData,index_col = False)
#DataFrame With Glass data
df2 = pd.read_csv(GlassData,index_col = False)
#DataFrame With Cancer data
df3 = pd.read_csv(CancerData,index_col = False)
#DataFrame With Soybean data
df4 = pd.read_csv(SoybeanData,index_col = False)
print("DataFrames have been created")
Vote = DataProcessor(NumberBins)
#Vote.bin_count = NumberBins
iris = DataProcessor(NumberBins)
#iris.bin_count = NumberBins
Glass = DataProcessor(NumberBins)
#Glass.bin_count = NumberBins
Cancer = DataProcessor(NumberBins)
#Cancer.bin_count = NumberBins
Soybean = DataProcessor(NumberBins)
#Soybean.bin_count = NumberBins
print("Pre Processor Objects Built ")
#df2 = Glass.StartProcess(df2)
df = Vote.StartProcess(df)
df1 = iris.StartProcess(df1)
df2 = Glass.StartProcess(df2)
df3 = Cancer.StartProcess(df3)
df4 = Soybean.StartProcess(df4)
print("Printing processed data to Files...")
df.to_csv('PreProcessedVoting' + '.csv', index=False, index_label=False)
df1.to_csv('PreProcessedIris' + '.csv', index=False, index_label=False)
df2.to_csv('PreProcessedGlass' + '.csv', index=False, index_label=False)
df3.to_csv('PreProcessedCancer'+ '.csv', index=False, index_label=False)
df4.to_csv('PreProcessedSoybean'+ '.csv', index=False, index_label=False)
Ta = TrainingAlgorithm.TrainingAlgorithm()
print("Starting Noise")
dfs = Ta.ShuffleData(copy.deepcopy(df))
print("vote dataset Noisey")
dfs1 = Ta.ShuffleData(copy.deepcopy(df1))
print("Iris Dataset Noisey")
dfs2 = Ta.ShuffleData(copy.deepcopy(df2))
print("glass Dataset Noisey")
dfs3 = Ta.ShuffleData(copy.deepcopy(df3))
print("cancer dataset Noisey")
dfs4 = Ta.ShuffleData(copy.deepcopy(df4))
print("soy Dataset Noisey")
print("\n")
print("Printing Noisey Data to Files...")
dfs.to_csv('PreProcessedVoting' +'_Noise'+ '.csv', index=False, index_label=False)
dfs1.to_csv('PreProcessedIris' + '_Noise'+ '.csv', index=False, index_label=False)
dfs2.to_csv('PreProcessedGlass' + '_Noise' + '.csv', index=False, index_label=False)
dfs3.to_csv('PreProcessedCancer' + '_Noise'+ '.csv', index=False, index_label=False)
dfs4.to_csv('PreProcessedSoybean' + '_Noise'+ '.csv', index=False, index_label=False)
print("Processing is complete ")
print("File creation is complete ")
|
import pytest
from TestData.Configuration import Config
from Tests.BaseTestSuite import BaseTestSuite
from PagesFactory import PagesFactory
pytestmark = [pytest.mark.skipif((Config.LOGIN is None or Config.PASSWORD is None), reason='LOGIN and Password required'),
pytest.mark.login]
class TestLoginSuite(BaseTestSuite):
def login(self, email, password):
login_page = PagesFactory(self.driver).main.open_page().toolbox.press_login_button()
login_page.login(email, password)
def test_login_logout(self):
self.login(Config.LOGIN, Config.PASSWORD)
authorized = PagesFactory(self.driver).find_work_authorized
assert authorized.is_url_opened()
login_page = authorized.toolbox.logout()
assert login_page.is_url_opened()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Xie Yanbo <xieyanbo@gmail.com>
# This software is licensed under the New BSD License. See the LICENSE
# file in the top distribution directory for the full license text.
"""A debug library and REPL for RobotFramework.
"""
from __future__ import print_function
import cmd
import os
import re
import sys
import tempfile
from functools import wraps
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.history import FileHistory
from prompt_toolkit.interface import AbortAction
from prompt_toolkit.shortcuts import print_tokens, prompt
from prompt_toolkit.styles import style_from_dict
from pygments.token import Token
from robot import run_cli
from robot.api import logger
from robot.errors import ExecutionFailed, HandlerExecutionFailed, ExecutionPassed, ExecutionFailures
from robot.libdocpkg.model import LibraryDoc
from robot.libdocpkg.robotbuilder import KeywordDocBuilder, LibraryDocBuilder
from robot.libraries import STDLIBS
from robot.libraries.BuiltIn import BuiltIn
from robot.running.namespace import IMPORTER
from robot.running.context import EXECUTION_CONTEXTS
from robot.running.signalhandler import STOP_SIGNAL_MONITOR
from robot.variables import is_var
import inspect
import math
from robot.running.model import TestCase, TestSuite, UserKeyword, Keyword, ResourceFile
__version__ = '1.1.4'
HISTORY_PATH = os.environ.get('RFDEBUG_HISTORY', '~/.rfdebug_history')
KEYWORD_SEP = re.compile(' +|\t')
def get_command_line_encoding():
"""Get encoding from shell environment, default utf-8"""
try:
encoding = sys.stdout.encoding
except AttributeError:
encoding = sys.__stdout__.encoding
return encoding or 'utf-8'
COMMAND_LINE_ENCODING = get_command_line_encoding()
class HelpMeta(type):
def __init__(cls, name, bases, attrs):
for key, value in attrs.items():
if key.startswith('do_') and hasattr(value, '__call__'):
def auto_help(self):
print(self.get_help_string(key))
attrs['help_' + key] = help # assign help method
type.__init__(cls, name, bases, attrs)
class BaseCmd(cmd.Cmd, object):
"""Basic REPL tool"""
__metaclass__ = HelpMeta
def emptyline(self):
"""By default Cmd runs last command if an empty line is entered.
Disable it."""
pass
def do_exit(self, arg):
"""Exit the interpreter. You can also use the Ctrl-D shortcut."""
return True
do_EOF = do_exit
def help_help(self):
"""Help of Help command"""
print('Show help message.')
def do_pdb(self, arg):
"""Enter the python debuger pdb. For development only."""
print('break into python debugger: pdb')
import pdb
pdb.set_trace()
def get_libs():
"""Get libraries robotframework imported"""
return sorted(IMPORTER._library_cache._items, key=lambda _: _.name)
def get_libs_as_dict():
"""Get libraries robotframework imported as a name -> lib dict"""
return {l.name: l for l in IMPORTER._library_cache._items}
def match_libs(name=''):
"""Find libraries by prefix of library name, default all"""
libs = [_.name for _ in get_libs()]
matched = [_ for _ in libs if _.lower().startswith(name.lower())]
return matched
def memoize(function):
"""Memoization decorator"""
memo = {}
@wraps(function)
def wrapper(*args):
if args in memo:
return memo[args]
else:
rv = function(*args)
memo[args] = rv
return rv
return wrapper
class ImportedLibraryDocBuilder(LibraryDocBuilder):
def build(self, lib):
libdoc = LibraryDoc(name=lib.name,
doc=self._get_doc(lib),
doc_format=lib.doc_format)
libdoc.inits = self._get_initializers(lib)
libdoc.keywords = KeywordDocBuilder().build_keywords(lib)
return libdoc
@memoize
def get_lib_keywords(library):
"""Get keywords of imported library"""
lib = ImportedLibraryDocBuilder().build(library)
keywords = []
for keyword in lib.keywords:
doc = keyword.doc.split('\n')[0]
keywords.append({'name': keyword.name,
'lib': library.name,
'doc': doc})
return keywords
def get_keywords():
"""Get all keywords of libraries"""
for lib in get_libs():
for keyword in get_lib_keywords(lib):
yield keyword
NORMAL_STYLE = style_from_dict({
Token.Head: '#00FF00',
Token.Message: '#CCCCCC',
})
ERROR_STYLE = style_from_dict({
Token.Head: '#FF0000',
Token.Message: '#FFFFFF',
})
def print_output(head, message, style=NORMAL_STYLE):
"""Print prompt-toolkit tokens to output"""
tokens = [
(Token.Head, head + ' '),
(Token.Message, message),
(Token, '\n'),
]
print_tokens(tokens, style=style)
def print_error(head, message, style=ERROR_STYLE):
"""Print to output with error style"""
print_output(head, message, style=style)
def parse_keyword(command):
unicode_command = ''
if sys.version_info > (3,):
unicode_command = command
else:
unicode_command = command.decode(COMMAND_LINE_ENCODING)
return KEYWORD_SEP.split(unicode_command)
def assign_variable(bi, variable_name, args):
variable_value = bi.run_keyword(*args)
bi._variables.__setitem__(variable_name, variable_value)
return variable_value
def run_keyword(bi, command):
"""Run a keyword in robotframewrk environment"""
if not command:
return
try:
keyword_args = parse_keyword(command)
keyword = keyword_args[0]
args = keyword_args[1:]
is_comment = keyword.strip().startswith('#')
if is_comment:
return
variable_name = keyword.rstrip('= ')
if is_var(variable_name):
variable_only = not args
if variable_only:
display_value = ['Log to console', keyword]
bi.run_keyword(*display_value)
else:
variable_value = assign_variable(bi,
variable_name,
args)
print_output('#',
'{} = {!r}'.format(variable_name,
variable_value))
else:
result = bi.run_keyword(keyword, *args)
if result:
print_output('<', repr(result))
except ExecutionFailed as exc:
print_error('! keyword:', command)
print_error('!', exc.message)
except HandlerExecutionFailed as exc:
print_error('! keyword:', command)
print_error('!', exc.full_message)
except Exception as exc:
print_error('! keyword:', command)
print_error('! FAILED:', repr(exc))
class CmdCompleter(Completer):
"""Completer for debug shell"""
def __init__(self, commands, cmd_repl=None):
self.names = []
self.displays = {}
self.display_metas = {}
for name, display, display_meta in commands:
self.names.append(name)
self.displays[name] = display
self.display_metas[name] = display_meta
self.cmd_repl = cmd_repl
def get_argument_completions(self, completer, document):
"""Using Cmd.py's completer to complete arguments"""
endidx = document.cursor_position_col
line = document.current_line
begidx = (line[:endidx].rfind(' ') + 1
if line[:endidx].rfind(' ') >= 0 else 0)
prefix = line[begidx:endidx]
completions = completer(prefix,
line,
begidx,
endidx)
for comp in completions:
yield Completion(comp, begidx - endidx, display=comp)
def get_completions(self, document, complete_event):
"""Compute suggestions"""
text = document.text_before_cursor.lower()
parts = KEYWORD_SEP.split(text)
if len(parts) >= 2:
cmd_name = parts[0].strip()
completer = getattr(self.cmd_repl, 'complete_' + cmd_name, None)
if completer:
for c in self.get_argument_completions(completer, document):
yield c
return
for name in self.names:
library_level = '.' in name and '.' in text
root_level = '.' not in name and '.' not in text
if not (root_level or library_level):
continue
if name.lower().strip().startswith(text.strip()):
display = self.displays.get(name, '')
display_meta = self.display_metas.get(name, '')
yield Completion(name,
-len(text),
display=display,
display_meta=display_meta)
class PtkCmd(BaseCmd):
"""CMD shell using prompt-toolkit"""
prompt = u'> '
get_prompt_tokens = None
prompt_style = None
intro = '''\
Only accepted plain text format keyword seperated with two or more spaces.
Type "help" for more information.\
'''
def __init__(self, completekey='tab', stdin=None, stdout=None):
BaseCmd.__init__(self, completekey, stdin, stdout)
self.history = FileHistory(os.path.expanduser(HISTORY_PATH))
def get_cmd_names(self):
"""Get all command names of CMD shell"""
pre = 'do_'
cut = len(pre)
return [_[cut:] for _ in self.get_names() if _.startswith(pre)]
def get_help_string(self, command_name):
"""Get help document of command"""
func = getattr(self, 'do_' + command_name, None)
if not func:
return ''
return func.__doc__
def get_helps(self):
"""Get all help documents of commands"""
return [(name, self.get_help_string(name) or name)
for name in self.get_cmd_names()]
def get_completer(self):
"""Get completer instance"""
commands = [(name, '', doc) for name, doc in self.get_helps()]
cmd_completer = CmdCompleter(commands, self)
return cmd_completer
def pre_loop(self):
pass
def cmdloop(self, intro=None):
"""Better command loop supported by prompt_toolkit
override default cmdloop method
"""
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro) + '\n')
#self.do_look(None)
stop = None
while not stop:
self.pre_loop()
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
kwargs = dict(history=self.history,
auto_suggest=AutoSuggestFromHistory(),
enable_history_search=True,
completer=self.get_completer(),
display_completions_in_columns=True,
on_abort=AbortAction.RETRY)
if self.get_prompt_tokens:
kwargs['get_prompt_tokens'] = self.get_prompt_tokens
kwargs['style'] = self.prompt_style
prompt_str = u''
else:
prompt_str = self.prompt
try:
line = prompt(prompt_str, **kwargs)
except EOFError:
line = 'EOF'
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
def get_prompt_tokens(self, cli):
"""Print prompt-toolkit prompt"""
return [
(Token.Prompt, u'> '),
]
class DebugCmd(PtkCmd):
"""Interactive debug shell for robotframework"""
get_prompt_tokens = get_prompt_tokens
prompt_style = style_from_dict({Token.Prompt: '#0000FF'})
def __init__(self, completekey='tab', stdin=None, stdout=None, debug_inst=None):
PtkCmd.__init__(self, completekey, stdin, stdout)
self.rf_bi = BuiltIn()
self.debug_inst = debug_inst
def postcmd(self, stop, line):
"""Run after a command"""
return stop
def reset_robotframework_exception(self):
if STOP_SIGNAL_MONITOR._signal_count:
STOP_SIGNAL_MONITOR._signal_count = 0
STOP_SIGNAL_MONITOR._running_keyword = True
logger.info('Reset last exception by DebugLibrary')
def pre_loop(self):
self.reset_robotframework_exception()
def do_next(self, arg):
from robot.running.model import Keyword
new_kwd = Keyword(name='Debug')
self.debug_inst.most_recent_step_runner.steps.insert(1, new_kwd)
return True
# STUB
def do_step(self, arg):
current_steps = self.debug_inst.most_recent_step_runner.steps
next_step = current_steps[0]
from robot.running.model import Keyword
#new_kwd = Keyword(name='Debug')
# TODO: insert this keyword into the subsequent step, instead of after it.
# new_kwd needs to be inserted into next_step.
self.debug_inst.debug_trigger = True
return True
#logger.console("Step is not implemented yet.")
def do_look(self, depth=math.inf):
depth = int(depth) if depth else math.inf
stack = self.debug_inst.keyword_stack
# import pdb
# pdb.set_trace()
libs = get_libs_as_dict()
print_stack = []
source_of_next = None
for idx, (name, attributes, context, step_runner) in enumerate(stack):
source = source_of_next
namespace = context.namespace
runner = namespace.get_runner(name)
from robot.running.librarykeywordrunner import LibraryKeywordRunner
from robot.running.userkeywordrunner import UserKeywordRunner
if isinstance(runner, LibraryKeywordRunner):
source_of_next = runner.library.source
elif isinstance(runner, UserKeywordRunner):
user_keywords = namespace._kw_store.user_keywords
if name in user_keywords.handlers:
source_of_next = user_keywords.source
else:
potential_sources = []
resources = namespace._kw_store.resources.values()
for resource in resources:
if name in [handler.longname for handler in resource.handlers]:
potential_sources.append(resource.source)
if len(potential_sources) > 1:
raise NotImplementedError("Have not implemented dealing with multiple resources.")
source_of_next = potential_sources[0]
else:
raise Exception("Runner passed that is not dealt with.")
# Top level
# for step in step_runner.og_steps:
# print(step)
current_steps = step_runner.steps
parent_stack = []
parent_set = {step.parent for step in current_steps}
parent_set.discard(None)
if len(parent_set) > 1:
print('current_steps has multiple parents.')
for step in current_steps:
print(step, step.parent)
raise Exception('current_steps has multiple parents.')
elif len(parent_set) == 0:
print(parent_set)
raise Exception('current_steps has no parents somehow')
parent = next(iter(parent_set))
while parent is not None:
if isinstance(parent, TestCase):
parent_stack.append('Test Case: {}'.format(parent.name))
elif isinstance(parent, TestSuite):
parent_stack.append('Test Suite: {}, source: {}'.format(parent.name, parent.source))
elif isinstance(parent, UserKeyword):
parent_stack.append('Keyword: {}, source: {}'.format(parent.name, source))
else:
print('uncaught parent type')
print(parent)
raise Exception('Uncaught parent type')
if hasattr(parent, 'parent'):
parent = parent.parent
else:
parent = None
highlighted_idx = len(step_runner.og_steps) - len(step_runner.steps)
if idx != len(stack)-1:
highlighted_idx -= 1
steps_stack = []
for idx, step in enumerate(step_runner.og_steps):
if idx == highlighted_idx:
steps_stack.append(' {} <-----'.format(step))
else:
steps_stack.append(' {}'.format(step))
print_stack.append((source, steps_stack, parent_stack))
print('----------')
if depth < len(print_stack):
print_stack = print_stack[-depth:]
while len(print_stack) > 0:
source, steps_stack, parent_stack = print_stack.pop(0)
while len(parent_stack) > 0:
string = parent_stack.pop()
print(string)
for step in steps_stack:
print(step)
def do_help(self, arg):
"""Show help message."""
if not arg.strip():
print('''\
Input Robotframework keywords, or commands listed below.
Use "libs" or "l" to see available libraries,
use "keywords" or "k" see the list of library keywords,
use the TAB keyboard key to autocomplete keywords.\
''')
PtkCmd.do_help(self, arg)
def get_completer(self):
"""Get completer instance specified for robotframework"""
# commands
commands = [(cmd_name,
cmd_name,
'DEBUG command: {0}'.format(doc))
for cmd_name, doc in self.get_helps()]
# libraries
for lib in get_libs():
commands.append((lib.name,
lib.name,
'Library: {0} {1}'.format(lib.name, lib.version)))
# keywords
for keyword in get_keywords():
# name with library
name = keyword['lib'] + '.' + keyword['name']
commands.append((name,
keyword['name'],
u'Keyword: {0}'.format(keyword['doc'])))
# name without library
commands.append((keyword['name'],
keyword['name'],
u'Keyword[{0}.]: {1}'.format(keyword['lib'],
keyword['doc'])))
cmd_completer = CmdCompleter(commands, self)
return cmd_completer
def do_selenium(self, arg):
"""Start a selenium webdriver and open url in browser you expect.
s(elenium) [<url>] [<browser>]
default url is google.com, default browser is firefox.
"""
command = 'import library SeleniumLibrary'
print_output('#', command)
run_keyword(self.rf_bi, command)
# Set defaults, overriden if args set
url = 'http://www.google.com/'
browser = 'firefox'
if arg:
args = KEYWORD_SEP.split(arg)
if len(args) == 2:
url, browser = args
else:
url = arg
if '://' not in url:
url = 'http://' + url
command = 'open browser %s %s' % (url, browser)
print_output('#', command)
run_keyword(self.rf_bi, command)
do_s = do_selenium
def complete_selenium(self, text, line, begin_idx, end_idx):
"""complete selenium command"""
webdrivers = ['firefox',
'chrome',
'ie',
'opera',
'safari',
'phantomjs',
'remote']
if len(line.split()) == 3:
command, url, driver_name = line.lower().split()
return [d for d in webdrivers if d.startswith(driver_name)]
elif len(line.split()) == 2 and line.endswith(' '):
return webdrivers
return []
complete_s = complete_selenium
def default(self, line):
"""Run RobotFramework keywords"""
command = line.strip()
run_keyword(self.rf_bi, command)
def do_libs(self, args):
"""Print imported and builtin libraries, with source if `-s` specified.
l(ibs) [-s]
"""
print_output('<', 'Imported libraries:')
for lib in get_libs():
print_output(' {}'.format(lib.name), lib.version)
if lib.doc:
print(' {}'.format(lib.doc.split('\n')[0]))
if '-s' in args:
print(' {}'.format(lib.source))
print_output('<', 'Builtin libraries:')
for name in sorted(list(STDLIBS)):
print_output(' ' + name, '')
do_l = do_libs
def complete_libs(self, text, line, begin_idx, end_idx):
"""complete libs command"""
if len(line.split()) == 1 and line.endswith(' '):
return ['-s']
return []
complete_l = complete_libs
def do_keywords(self, args):
"""Print keywords of libraries, all or starts with <lib_name>
k(eywords) [<lib_name>]
"""
lib_name = args
matched = match_libs(lib_name)
if not matched:
print_error('< not found library', lib_name)
return
libs = get_libs_as_dict()
for name in matched:
lib = libs[name]
print_output('< Keywords of library', name)
for keyword in get_lib_keywords(lib):
print_output(' {}\t'.format(keyword['name']),
keyword['doc'])
do_k = do_keywords
def complete_keywords(self, text, line, begin_idx, end_idx):
"""complete keywords command"""
if len(line.split()) == 2:
command, lib_name = line.split()
return match_libs(lib_name)
elif len(line.split()) == 1 and line.endswith(' '):
return [_.name for _ in get_libs()]
return []
complete_k = complete_keywords
class DebugLibrary(object):
"""Debug Library for RobotFramework"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = __version__
ROBOT_LISTENER_API_VERSION = 2
keyword_stack = []
most_recent_step_runner = None
debug_trigger = False
def __init__(self, experimental=False):
logger.error('initializing debuglib')
if experimental:
StepRunner.run_steps = run_steps
self.ROBOT_LIBRARY_LISTENER = self
self.step_runner = None
self.debug_trigger = False
def start_test(self, name, results):
if len(self.keyword_stack) != 0:
raise Exception("Keyword stack not empty at test start")
def start_keyword(self, name, attributes):
context = EXECUTION_CONTEXTS.current
self.keyword_stack.append((name, attributes, context, self.most_recent_step_runner))
def end_keyword(self, name, attributes):
self.keyword_stack.pop()
def end_test(self, name, results):
if len(self.keyword_stack) != 0:
raise Exception("Keyword start not empty at test end.")
@staticmethod
def _get_closest_step_runner():
f = inspect.currentframe().f_back
while f is not None:
loc = f.f_locals
for v in loc.values():
if isinstance(v, StepRunner):
return v
f = f.f_back
raise Exception("No StepRunner found, this should never happen.")
# 0-indexed
@staticmethod
def _get_nth_closest_step_runner(n):
srs = set()
f = inspect.currentframe().f_back
while f is not None:
loc = f.f_locals
for v in loc.values():
if isinstance(v, StepRunner):
if len(srs) == n and v not in srs:
return v
elif v not in srs:
srs.add(v)
elif v in srs:
pass
f = f.f_back
raise Exception("No StepRunner found, this should never happen.")
def debug(self):
"""Open a interactive shell, run any RobotFramework keywords.
Keywords seperated by two space or one tab, and Ctrl-D to exit.
"""
# re-wire stdout so that we can use the cmd module and have readline
# support
self.debug_trigger = False
old_stdout = sys.stdout
sys.stdout = sys.__stdout__
print_output('\n>>>>>', 'Enter interactive shell')
debug_cmd = DebugCmd(debug_inst=self)
debug_cmd.cmdloop()
print_output('\n>>>>>', 'Exit shell.')
# put stdout back where it was
sys.stdout = old_stdout
def get_remote_url(self):
"""Get selenium URL for connecting to remote WebDriver."""
s = BuiltIn().get_library_instance('Selenium2Library')
url = s._current_browser().command_executor._url
return url
def get_session_id(self):
"""Get selenium browser session id."""
s = BuiltIn().get_library_instance('Selenium2Library')
job_id = s._current_browser().session_id
return job_id
def get_webdriver_remote(self):
"""Print the way connecting to remote selenium server."""
remote_url = self.get_remote_url()
session_id = self.get_session_id()
s = 'from selenium import webdriver;' \
'd=webdriver.Remote(command_executor="%s",' \
'desired_capabilities={});' \
'd.session_id="%s"' % (
remote_url,
session_id
)
logger.console('''
DEBUG FROM CONSOLE
# geckodriver user please check https://stackoverflow.com/a/37968826/150841
%s
''' % (s))
logger.info(s)
return s
TEST_SUITE = b'''*** Settings ***
Library DebugLibrary
** test case **
RFDEBUG REPL
debug
'''
def shell():
"""A standalone robotframework shell"""
with tempfile.NamedTemporaryFile(prefix='robot-debug-',
suffix='.txt',
delete=False) as test_file:
try:
test_file.write(TEST_SUITE)
test_file.flush()
default_no_logs = '-l None -x None -o None -L None -r None'
if len(sys.argv) > 1:
args = sys.argv[1:] + [test_file.name]
else:
args = default_no_logs.split() + [test_file.name]
rc = run_cli(args)
sys.exit(rc)
finally:
test_file.close()
# pybot will raise PermissionError on Windows NT or later
# if NamedTemporaryFile called with `delete=True`,
# deleting test file seperated will be OK.
if os.path.exists(test_file.name):
os.unlink(test_file.name)
if __name__ == '__main__':
shell()
from robot.running.steprunner import StepRunner
def run_steps(self, steps):
from robot.api import logger
logger.error('run_steps')
logger.error(steps)
debugLibrary = BuiltIn().get_library_instance('DebugLibrary')
debugLibrary.most_recent_step_runner = self
errors = []
self.steps = []
self.og_steps = steps
from robot.api import logger
logger.error('run steps before trigger check {}'.format(debugLibrary.debug_trigger))
if debugLibrary.debug_trigger:
logger.error('run steps debug trigger')
new_kwd = Keyword(name='Debug')
self.steps.append(new_kwd)
debugLibrary.debug_trigger = False
for step in steps:
self.steps.append(step)
while len(self.steps) > 0:
self.cur_step = self.steps.pop(0)
try:
self.run_step(self.cur_step)
except ExecutionPassed as exception:
exception.set_earlier_failures(errors)
raise exception
except ExecutionFailed as exception:
errors.extend(exception.get_errors())
if not exception.can_continue(self._context.in_teardown,
self._templated,
self._context.dry_run):
break
if errors:
raise ExecutionFailures(errors)
|
"""
chapter 6
Lists
pg 123 - 146
"""
# 124
# cmd python
# ctrl + z to exit
"""
# Strings Literals
# Double Quotes
spam = "that is Alice's cat."
# Escape Characters
spam = 'Say hi to Bob\'s mother.'
works!!
"""
# 125
# cmd python
# ctrl + z to exit
"""
print("Hello there!\nHow are you?\nI\'m doing fine. ")
# Raw Strings
print(r'That is Carol\'s cat.')
# Multiline Strings with Triple Quotes
# catnapping.py
print('''Dear alice,
Eve's cat has been arrested for catnapping, cat burglary, and extortion.
Sincerely,
Bob''')
works!!
"""
# 126
"""
print('Dear alice,\n\nEve\'s cat has been arrested for catnapping, cat burglary, and extortion.\n\nSincerely,\nBob')
# Multiline Comments
# ### this is a test Python program.
# written by Al Sweigart al@inventwithpython.com
# this program was designed for Python 3, not Python 2.
# ###
# def spam():
# ### this is a multiline comment to help
# explain what the spam() function does.###
# print('Hello!')
# Indexing and Slicing Strings
# cmd python
# ctrl + z to exit
spam = 'Hello world!'
spam[0]
spam[4]
spam[-1]
spam[0:5]
spam[:5]
spam[6:]
works!! sort of. had to replace the triple " to ###
"""
# 127
# cmd python
# ctrl + z to exit
"""
spam = 'Hello world!'
fizz = spam[0:5]
fizz
# The in and out in Operators with Strings
'Hello' in 'Hello World'
'Hello' in 'Hello'
'HellO' in 'Hello World'
'' in 'spam'
'cats' not in 'cats and dogs'
works!!
"""
# 128
# cmd python
# ctrl + z to exit
"""
# The upper(), lower(), isupper(), and islower() String Methods
spam = 'Hello world!'
spam = spam.upper()
spam
spam = spam.lower()
spam
print('How are you?')
feeling = input()
if feeling.lower() == 'great':
print('I feel great too.')
else:
print('I hope the rest of your day is good.')
works!!
"""
# 129
# cmd python
# ctrl + z to exit
"""
spam = 'Hello world!'
spam.islower()
spam.isupper()
'HELLO'.isupper()
'abc12345'.islower()
'12345'.islower()
'12345'.isupper()
'Hello'.upper()
'Hello'.upper().lower()
'Hello'.upper().lower().upper()
'HELLO'.lower()
'HELLO'.lower().islower()
# The isX String Methods
works!!
"""
# 130
# cmd python
# ctrl + z to exit
"""
'hello'.isalpha()
'hello123'.isalpha()
'hello123'.isalnum()
'hello'.isalnum()
'123'.isdecimal()
' '.isspace()
'This Is Title Case'.istitle()
'This Is Title Case 123'.istitle()
'This Is not Title Case'.istitle()
'This Is NOT Title Case Either'.istitle()
# validateInput.py
while True:
print('Enter your age:')
age = input()
if age.isdecimal():
break
print('Please enter a number for your age.')
while True:
print('Select a new password (letters and numbers only):')
password = input()
if password.isalnum():
break
print('Passwords can only have letters and numbers.')
works!!
"""
# 131
# cmd python
# ctrl + z to exit
"""
# The startswith() and endswith() String Methods
'Hello world!'.startswith('Hello')
'Hello world!'.endswith('world!')
'abc123'.startswith('abcdef')
'abc123'.endswith('12')
'Hello world!'.startswith('Hello world!')
'Hello world!'.endswith('Hello world!')
# The join() and split() String Methods
works!!
"""
# 132
# cmd python
# ctrl + z to exit
"""
', '.join(['cats', 'rats', 'bats'])
' '.join(['My', 'name', 'is', 'Simon'])
'ABC'.join(['My', 'name', 'is', 'Simon'])
'My name is Simon'.split()
'MyABCnameABCisABCSimon'.split('ABC')
'My name is Simon'.split('m')
spam = '''Dear Alice,
How have you been? I am fine.
There is a container in the fridge
that is labeled "Milk Experiment".
Please do not drink it.
Sincerely,
Bob'''
spam.split('\n')
works!!
"""
# 133
# cmd python
# ctrl + z to exit
# Justifying Text with rjust(), ljust(), and center()
"""
'Hello'.rjust(10)
'Hello'.rjust(20)
'Hello World'.rjust(20)
'Hello'.ljust(10)
'Hello'.rjust(20, '*')
'Hello'.ljust(20, '-')
'Hello'.center(20)
'Hello'.center(20, '=')
# picnicTable.py
def printPicnic(itemsDict, leftWidth, rightWidth):
print('PICNIC ITEMS'.center(leftWidth + rightWidth, '-'))
for k, v in itemsDict.items():
print(k.ljust(leftWidth, '.') + str(v).rjust(rightWidth))
picnicItems = {'sandwiches': 4, 'apples': 12, 'cups': 4, 'cookies': 8000}
printPicnic(picnicItems, 12, 5)
printPicnic(picnicItems, 20, 6)
works ! !
"""
# 134 - 135
# cmd python
# ctrl + z to exit
"""
# Removing Whitespace with strip(), rstrip(), and lstrip()
spam = ' Hello World '
spam.strip()
spam.lstrip()
spam.rstrip()
spam = 'SpamSpamBaconSpamEggsSpamSpam'
spam.strip('ampS')
# Copying and Pasting Strings with the pyperclip Module
# import pyperclip
# pyperclip.copy('Hello world!')
# pyperclip.paste()
# pyperclip.paste()
sort of works!!
"""
# 137
# cmd python
# ctrl + z to exit
"""
#! python3
# pw.py - An insecure password locker program.
import sys
PASSWORDS = {'email': 'F7minlBDDuvMJuxESSKHFhTxFtjVB6',
'blog': 'VmALvQyKAxiVH5G8v01if1MLZF3sdt',
'luggage': '12345'}
# Step 2: Handle Command Line Arguments
#! python3
# pw.py - An insecure password locker program.
PASSWORDS = {'email': 'F7minlBDDuvMJuxESSKHFhTxFtjVB6',
'blog': 'VmALvQyKAxiVH5G8v01if1MLZF3sdt',
'luggage': '12345'}
if len(sys.argv) < 2:
print('Usage: python pw.py [account] - copy account password')
sys.exit()
account = sys.argv[1] # first command line arg is the account name
works!!
"""
# 137 - 138
# cmd python
# ctrl + z to exit
"""
# Step 3: Copy the Right Password
#! python3
# pw.py - An insecure password locker program.
PASSWORDS = {'email': 'F7minlBDDuvMJuxESSKHFhTxFtjVB6',
'blog': 'VmALvQyKAxiVH5G8v01if1MLZF3sdt',
'luggage': '12345'}
# import sys, pyperclip
if len(sys.argv) < 2:
print('Usage: py pw.py [account] - copy account password')
sys.exit()
account = sys.argv[1] # first command line arg is the account name
if account in PASSWORDS:
pyperclip.copy(PASSWORDS[account])
print('Password for ' + account + ' copied to clipboard.')
else:
print('There is no account named ' + account)
sort of works!!
""" |
# --- Find the Median
def bigger(a,b):
if a > b:
return a
else:
return b
def biggest(a,b,c):
return bigger(a,bigger(b,c))
# --- My solution
def median(a,b,c):
if a == biggest(a,b,c):
a = 0
return biggest(a,b,c)
if b == biggest(a,b,c):
b = 0
return biggest(a,b,c)
if c == biggest(a,b,c):
c = 0
return biggest(a,b,c)
# --- There solution - cleaner way to elimninate variables
def median1(a,b,c):
big = biggest(a,b,c)
if big == a:
return bigger(b,c)
if big == b:
return bigger(a,c)
else:
return bigger(a,b)
print(median(1,2,3))
#>>> 2
print(median(9,3,6))
#>>> 6
print(median(7,8,7))
#>>> 7
print(median1(1,2,3))
#>>> 2
print(median1(9,3,6))
#>>> 6
print(median1(7,8,7))
#>>> 7
# ---- BLASTOFF -- While Loops
def countdown(n):
while n > 0:
print n
n = n - 1
print 'Blastoff'
countdown(9)
# --- Problem - Find Last
# Define a procedure, find_last, that takes as input
# two strings, a search string and a target string,
# and returns the last position in the search string
# where the target string appears, or -1 if there
# are no occurrences.
#
# Example: find_last('aaaa', 'a') returns 3
# Make sure your procedure has a return statement.
def find_last(string,target):
location = string.find(target,0)
last_found = location
while location != -1:
last_found = location
location = string.find(target, location+1)
return last_found
print find_last('aaaa', 'a')
#>>> 3
print find_last('aaaaa', 'aa')
#>>> 3
print find_last('aaaa', 'b')
#>>> -1
print find_last("111111111", "1")
#>>> 8
print find_last("222222222", "")
#>>> 9
print find_last("", "3")
#>>> -1
#print find_last("", "")
#>>> 0
# Define a procedure weekend which takes a string as its input, and
# returns the boolean True if it's 'Saturday' or 'Sunday' and False otherwise.
def weekend(day):
# your code here
if day == 'Saturday' or day == 'Sunday':
return True
else:
return False
print weekend('Monday')
#>>> False
print weekend('Saturday')
#>>> True
print weekend('July')
#>>> False
# Define a procedure, stamps, which takes as its input a positive integer in
# pence and returns the number of 5p, 2p and 1p stamps (p is pence) required
# to make up that value. The return value should be a tuple of three numbers
# (that is, your return statement should be followed by the number of 5p,
# the number of 2p, and the nuber of 1p stamps).
#
# Your answer should use as few total stamps as possible by first using as
# many 5p stamps as possible, then 2 pence stamps and finally 1p stamps as
# needed to make up the total.
#
# (No fair for USians to just say use a "Forever" stamp and be done with it!)
#
def stamps(change):
fivep = 0
twop = 0
onep = 0
while change > 0:
if change > 4:
change = change - 5
fivep = fivep + 1
else:
if change > 1:
change = change - 2
twop = twop + 1
else:
if change > 0:
change = change -1
onep = onep + 1
return (fivep, twop, onep)
# Your code here
print stamps(8)
#>>> (1, 1, 1) # one 5p stamp, one 2p stamp and one 1p stamp
print stamps(5)
#>>> (1, 0, 0) # one 5p stamp, no 2p stamps and no 1p stamps
print stamps(29)
#>>> (5, 2, 0) # five 5p stamps, two 2p stamps and no 1p stamps
print stamps(0)
#>>> (0, 0, 0) # no 5p stamps, no 2p stamps and no 1p stamps
# The range of a set of values is the maximum value minus the minimum
# value. Define a procedure, set_range, which returns the range of three input
# values.
# Hint: the procedure, biggest which you coded in this unit
# might help you with this question. You might also like to find a way to
# code it using some built-in functions.
def set_range(a, b, c):
max_value = max(a,b,c)
min_value = min(a,b,c)
range = max_value - min_value
return range
# Your code here
print set_range(10, 4, 7)
#>>> 6 # since 10 - 4 = 6
print set_range(1.1, 7.4, 18.7)
#>>> 17.6 # since 18.7 - 1.1 = 17.6 |
import requests
import urlparse
import os
from bs4 import BeautifulSoup
def get_pdf_urls():
"""Scrape the Supreme Court oral argument transcript sites
to return a list of urls to all of the oral argument transcript pdfs.
The site urls look like this:
http://www.supremecourt.gov/oral_arguments/argument_transcript/[YEAR]
where [YEAR] starts at 2004 and ends at 2014
The pdf urls look like this:
http://www.supremecourt.gov/oral_arguments/argument_transcripts/[NAME].pdf
"""
prefix = "http://www.supremecourt.gov/oral_arguments/argument_transcript/"
base_urls = [prefix + str(num) for num in range(2004, 2015)]
pdf_urls = []
for base_url in base_urls:
l = requests.get(base_url)
soup = BeautifulSoup(l.content, 'html.parser')
# Find every oral argument transcript url ('href') in the html.
for a in soup.find_all('a', href=True):
if a['href'].startswith('../argument_transcripts/'):
pdf_url = urlparse.urljoin(base_url, a['href'])
pdf_urls.append(pdf_url)
return pdf_urls
def download_pdfs(pdf_urls):
"""Download each oral argument transcript pdf."""
for pdf_url in pdf_urls:
os.system('wget %s' % pdf_url)
if __name__ == '__main__':
pdf_urls = get_pdf_urls()
download_pdfs(pdf_urls) |
from django.contrib import admin
from .models.category import Category
from .models.page import Page
class teste_Category(TestCase):
def teste_comment_nulo(self):
Category.objects.get_or_create(name = "Teste")
self.assertEquals(Category.objects.find(name = "Teste"),True)
|
import numpy as np
'''动态规划
'''
def minPathSum_M_N(m):
if (len(m) == 0) or (len(m[0]) == 0):
return 0
rows = len(m)
cols = len(m[0])
matdp = np.zeros((4,4))
# print(matdp)
matdp[0][0] = m[0][0]
for i in range(1, rows):
matdp[i][0] = matdp[i-1][0] + m[i][0]
for j in range(1, cols):
matdp[0][j] = matdp[0][j-1] + m[0][j]
for i in range(1, rows):
for j in range(1, cols):
matdp[i][j] = min(matdp[i-1][j], matdp[i][j-1]) + m[i][j]
return matdp[rows-1][cols-1]
def minPathSum_M(m):
if (len(m)==0) or (len(m[0])==0):
return 0
rows = len(m)
cols = len(m[0])
arr = np.zeros(min(rows, cols))
arr[0] = m[0][0]
drctn = 1 if rows > cols else 0
if drctn:
# rows > cols 时,以 cols 大小作为数组长度,沿 x 方向遍历。
# 第一行和第一列依旧特殊处理
for i in range(1, cols):
arr[i] = arr[i-1] + m[0][i]
for j in range(1, cols):
arr[0] = arr[0] + m[0][j]
for i in range(1, rows):
arr[j] = min(arr[j], arr[j-1]) + m[i][j]
return arr[-1]
else:
for j in range(1, rows):
arr[j] = arr[j-1] + m[j][0]
for i in range(1, cols):
arr[0] = arr[0] + m[0][i]
for j in range(1, rows):
arr[j] = min(arr[j-1], arr[j]) + m[j][i]
return arr[-1]
if __name__ == "__main__":
# mat = np.array([[1, 3, 5, 9], [8, 1, 3, 4], [5, 0, 6, 1], [8, 8, 4, 0]])
mat = np.array([[1, 3, 5, 9], [5, 0, 6, 1], [8, 8, 4, 0]])
# flag = minPathSum_M_N(mat)
flag = minPathSum_M(mat)
print(flag)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from watchdog import Watchdog
if __name__ == "__main__":
Watchdog(["osascript", "-e", """tell application "Safari"
do JavaScript "window.location.reload()" in front document
end tell"""]).run()
|
import sys
import configparser as cp
try:
from pyspark import SparkContext, SparkConf
props = cp.RawConfigParser()
props.read("src/main/Resources/application.ini")
# env = sys.argv[1]
conf = SparkConf().setMaster(props.get(sys.argv[5], 'executionMode')).setAppName("Revenue Per Month")
sc = SparkContext(conf=conf)
# input dir, output base dir, local base dir, month, environment
inputPath = sys.argv[1]
outPath = sys.argv[2]
month = sys.argv[3]
Path = sc._gateway.jvm.org.apache.hadoop.fs.Path
FileSystem = sc._gateway.jvm.org.apache.hadoop.fs.FileSystem
Configuration = sc._gateway.jvm.org.apache.hadoop.conf.Configuration
fs = FileSystem.get(Configuration())
if(fs.exists(Path(inputPath)) == False):
print("Input path does not exists")
else:
if(fs.exists(Path(outPath))):
fs.delete(Path(outPath), True)
orders = inputPath + "/orders"
ordersFiltered = sc.textFile(orders).\
filter(lambda order:month in order.split(",")[1]).\
map(lambda order: (int(order.split(",")[0]), 1))
orderItems = inputPath + "/order_items"
revenueByProductId = sc.textFile(orderItems). \
map(lambda orderItem:
(int(orderItem.split(",")[1]),
(int(orderItem.split(",")[2]), float(orderItem.split(",")[4])
))
). \
join(ordersFiltered). \
map(lambda rec: rec[1][0]). \
reduceByKey(lambda total, ele: total + ele)
localPath = sys.argv[4]
productsFile = open(localPath + "/products/part-00000")
products = productsFile.read().splitlines()
# Convert into RDD and extract product_id and product_name
# Join it with aggregated order_items (product_id, revenue)
# Get product_name and revenue for each product
sc.parallelize(products). \
map(lambda product:
(int(product.split(",")[0]), product.split(",")[2])). \
join(revenueByProductId). \
map(lambda rec: rec[1][0] + "\t" + str(rec[1][1])). \
saveAsTextFile(outPath)
print("Successfully imported Spark Modules")
except ImportError as e:
print("can not import spark modules", e)
sys.exit(1)
|
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class DblpItem(scrapy.Item):
# define the fields for your item here like:
ConOrJou = scrapy.Field()
ConOrJouName = scrapy.Field()
authors = scrapy.Field()
title = scrapy.Field()
category = scrapy.Field()
# info = scrapy.Field()
|
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
if not nums or len(nums) < 4: return []
nums.sort()
res = []
for i in xrange(len(nums)-3):
if i != 0 and nums[i] == nums[i-1]: continue
for j in xrange(i+1, len(nums)-2):
if j != i+1 and nums[j] == nums[j-1]: continue
goal = target - nums[i] - nums[j]
start, end = j+1, len(nums)-1
while start < end:
if nums[start]+nums[end] == goal:
res.append([nums[i], nums[j], nums[start], nums[end]])
start += 1
while start < end and nums[start] == nums[start-1]:
start += 1
elif nums[start]+nums[end] < goal:
start += 1
elif nums[start]+nums[end] > goal:
end -= 1
return res
|
def main():
maxi = 0
n, m = map(int, input().split())
lst = list(map(int, input().split()))
for i in range(n):
for j in range(i+1, n):
for k in range(j+1, n):
if maxi < lst[i]+lst[j]+lst[k] <= m:
maxi = lst[i]+lst[j]+lst[k]
print(maxi)
main()
|
# Python 3.4 program to recursively scan from current folder
# or folder dropped onto file or specificed in command line
# for every file with an extension in the exts list:
# check if file's album tag is "recompressed", if not recompress it, and add that tag
# then rename/overwrite of OVERWRITE = True
#
#
#
# Copy ffmpeg and ffprobe into the same folder as this script.
import sys
import subprocess
import os
from pathlib import Path
import shlex
import re
import time
def escape_argument(arg):
# From http://stackoverflow.com/a/29215357/1499289
# Escape the argument for the cmd.exe shell.
# See http://blogs.msdn.com/b/twistylittlepassagesallalike/archive/2011/04/23/everyone-quotes-arguments-the-wrong-way.aspx
#
# First we escape the quote chars to produce a argument suitable for
# CommandLineToArgvW. We don't need to do this for simple arguments.
if not arg or re.search(r'(["\s])', arg):
arg = '"' + arg.replace('"', r'\"') + '"'
return escape_for_cmd_exe(arg)
def escape_for_cmd_exe(arg):
# Escape an argument string to be suitable to be passed to
# cmd.exe on Windows
#
# This method takes an argument that is expected to already be properly
# escaped for the receiving program to be properly parsed. This argument
# will be further escaped to pass the interpolation performed by cmd.exe
# unchanged.
#
# Any meta-characters will be escaped, removing the ability to e.g. use
# redirects or variables.
#
# @param arg [String] a single command line argument to escape for cmd.exe
# @return [String] an escaped string suitable to be passed as a program
# argument to cmd.exe
meta_chars = '()%!^"<>&|'
meta_re = re.compile('(' + '|'.join(re.escape(char) for char in list(meta_chars)) + ')')
meta_map = { char: "^%s" % char for char in meta_chars }
def escape_meta_chars(m):
char = m.group(1)
return meta_map[char]
return meta_re.sub(escape_meta_chars, arg)
def isRecompressed(inputPath):
# runs ffprobe to read the album tag, returns True if tag exists and equals "recompressed"
cmd = 'ffprobe -hide_banner -of default=noprint_wrappers=1 -show_entries format_tags=album -v quiet '+str(inputPath)
probe = subprocess.call(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, outputError = probe.communicate()
print(output, outputError)
output = output.strip().decode('utf-8')
if len(output) < 1: return False
return (output.split('=')[1] == "recompressed")
def doRecompress(inputPath, outputPath):
# runs ffmpeg to recompress to crf 23 (this approx halves size of ipad 720p videos)
# sets the album tag to "recompressed"
cmd = 'ffmpeg -hide_banner -v quiet -i '+str(inputPath)+' -metadata album="recompressed" \
-c:v libx264 -preset slow -crf 23 -acodec copy '+str(outputPath)
#test command that does not recompress
#cmd = os.path.join(__location__,"ffmpeg") + ' -hide_banner -v quiet -i '+str(inputPath)+' -metadata album="recompressed" \
# -c copy '+str(outputPath)
print(cmd)
try:
probe = subprocess.call(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, outputError = probe.communicate()
returncode = 0
print(output, outputError)
except CalledProcessError as e:
print("ffmpeg returned error", e.returncode)
returncode = e.returncode
return (returncode == 0)
def overwriteFile(inputPath, outputPath):
# renames original file to add .original to the end (uncomment next line to delete it instead
#os.remove(str(inputPath))
os.rename(str(inputPath), str(inputPath)+".original")
os.rename(str(outputPath), str(inputPath))
def shrinkFile(inputPath):
# create a filename with -shrunk before the extension
name = str(inputPath.stem)
name = name + "-shrunk" + str(inputPath.suffix)
outputPath = inputPath.with_name(name)
# escape paths, Windows or standard shell
if os.name == "nt":
inputPathEsc = escape_argument(str(inputPath))
outputPathEsc = escape_argument(str(outputPath))
else:
inputPathEsc = shlex.quote(str(inputPath))
outputPathEsc = shlex.quote(str(outputPath))
#read metadata
alreadyProcessed = isRecompressed(inputPathEsc)
if alreadyProcessed:
print("File "+str(inputPath)+" already recompressed")
success = False
else:
#recompress file
success = doRecompress(inputPathEsc, outputPathEsc)
# if recompression worked, and as a final recheck, the album tag is now "recompressed", overwrite original file (actually rename)
if success and isRecompressed(outputPathEsc):
print("New file has recompressed metadata")
if OVERWRITE:
overwriteFile(inputPath, outputPath)
# OVERWRITE = True calls overwriteFile, which, by default, just renames
# OVERWRITE = False would leave the original file, and a file named filename-shrunk.ext
OVERWRITE = True
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
# If you don't drag a folder or run with an extension, attempt to use folder script is run from
if len(sys.argv) > 1:
i = sys.argv[1]
else:
i = __location__
print(i)
exts = ['mov', 'MOV', 'mp4', 'MP4', 'm4v', 'M4V']
while True:
for ext in exts:
files = sorted(Path(i).glob('**/*.'+ext))
for file in files:
shrinkFile(file)
print("Es wird 1 Minute gewartet")
time.sleep(60*1)
|
from tools.test_case_generators.raw_file_reader.raw_file_reader \
import RawFileReader
import abc
import sys
class KuugaPseudoInstruction(object):
"""
An abstract class that represents a Pseudo-Assembly language instruction.
Crucially it contains a method that allows the instruction to be expanded.
Either into a set of Pseduo-Instructions that themselves have to be
expanded, or into a set of SUBLEQ instructions.
Attributes:
name: The name of the PseudoInstruction
"""
@abc.abstractproperty
def name(self):
return
@abc.abstractmethod
def expand_instruction(self, instruction, start_location):
"""
An abstract method that creates, from a given instruction, the expansion
of the instruction into either more instructions that need expanding
or a set of SUBLEQ instructions. Or some combination of the two
:param instruction: A list of strings that represent a high level
instruction.
:param start_location: The place the instruction occurs in memory,
so that jumps can be made relative to it.
:return: A list of sublists that contain either the operands needed
for SUBLEQ or the instructions to be further expanded.
"""
return
class AddPseudoInstruction(KuugaPseudoInstruction):
"""
A concrete implementation of a function that adds the contents of two memory
locations together, storing the result in the first given location.
Descriptions of inherited fields are given in the abstract class.
"""
@property
def name(self):
return "ADD"
def expand_instruction(self, instruction, start_location):
return [
# Subtract the first operand from 0, stored in TADD,
# a temporary variable.
[instruction[2], "TADD", start_location+1],
# Subtract the next instruction from TADD
["TADD", instruction[1], start_location+2],
# Zero TADD so it can be used again
["TADD", "TADD", start_location+3]
]
class SubtractPseudoInstruction(KuugaPseudoInstruction):
"""
A concrete implementation of a function that subtracts the contents of
two memory locations, storing the result in the first given
location. Descriptions of inherited fields are given in the abstract class.
"""
@property
def name(self):
return "SUB"
def expand_instruction(self, instruction, start_location):
# Simply call SUBLEQ with the instructions in the correct order.
return [[instruction[2], instruction[1], start_location+1]]
class NOTPseudoInstruction(KuugaPseudoInstruction):
"""
A concrete implementation of a function that calculates the bitwise NOT of
the contents of a given memory location and stores the result in that
memory location. Descriptions of inherited fields are given in the
abstract class.
"""
@property
def name(self):
return "NOT"
def expand_instruction(self, instruction, start_location):
return [
# Subtract the given operand from TNOT (a temporary variable
# set to 0)
["SUB", "TNOT", instruction[1], start_location+1],
# Subtract 1 from TNOT
["SUB", "TNOT", "ON", start_location+2],
# Move the result from TNOT to the original location
["MOVE", instruction[1], "TNOT", start_location+3],
# Zero TNOT so it can be used again
["TNOT", "TNOT", start_location+4]]
class MultiplyPseudoInstruction(KuugaPseudoInstruction):
"""
A concrete implementation of a function that multiplies the contents of two
memory locations together stores the result in the first memory location.
Descriptions of inherited fields are given in the abstract class.
"""
@property
def name(self):
return "MUL"
def expand_instruction(self, instruction, start_location):
return [
# Copy the second operand into a temporary variable TMUL1
["COPY", "TMUL1", instruction[2], start_location+1],
# Subtract 1 from TMUL1 to check if the loop can break or more
# needs to be added.
["ON", "TMUL1", start_location+4],
# Add the first operand to TMUL2 where the multiplication
# accumulates
["ADD", "TMUL2", instruction[1], start_location+3],
# Jump back to the loop test.
["Z", "Z", start_location+1],
# Add one last amount because the branch occurs at 0 which is not
# desirable but unavoidable
["ADD", instruction[1], "TMUL2", start_location+5],
# Zero both the temporary locations.
["TMUL1", "TMUL1", start_location+6],
["TMUL2", "TMUL2", start_location+7]
]
class MOVEPseudoInstruction(KuugaPseudoInstruction):
"""
A concrete implementation of a function that moves the contents of one
memory location to another, it zeroes the original memory location.
Descriptions of inherited fields are given in the abstract class.
"""
@property
def name(self):
return "MOVE"
def expand_instruction(self, instruction, start_location):
return [
# Zero the location that is being moved into
[instruction[1], instruction[1], start_location+1],
# Set TMOVE to the negative of the second operand
[instruction[2], "TMOVE", start_location+2],
# Set the contents of the first memory location to minus TMOVE
# (so the original contents of instruction[2]).
["TMOVE", instruction[1], start_location + 3],
# Zero TMOVE and move on.
["TMOVE", "TMOVE", start_location + 4]
]
class DividePseudoInstruction(KuugaPseudoInstruction):
"""
A concrete implementation of a function that divides the contents of the
first memory location by the second, storing the result in the first
memory location. Descriptions of inherited fields are given in the abstract
class.
"""
@property
def name(self):
return "DIV"
def expand_instruction(self, instruction, start_location):
return [
# Subtract the second operand from the first, if this produces a
# positive result then there is more dividing to do, otherwise
# break to the zero test.
[instruction[2], instruction[1], start_location+3],
# Add 1 to TDIV1, a Temporary Variable, the accumulating result
["ADD", "TDIV1", "ON", start_location+2],
# Branch back to the break check.
["Z", "Z", start_location],
# Because the break test above branches on 0 or less then there's
# the possibility that the last subtraction actually produced a
# negative result (i.e there's not enough to divide properly and
# what's left is the remainder). So subtract 0 from the first
# operand (the one that's been decreasing). If that produces a 0
# result then one needs to be added, otherwise do nothing.
[instruction[1], "TDIV2", start_location+5],
# Move into the final clean up phase.
["Z", "Z", start_location+6],
# Add 1 to TDIV 1 to account for this extra subtraction found above.
["ADD", "TDIV1", "ON", start_location+4],
# Move the result from the temporary variable into the first memory
# operand.
["MOVE", instruction[1], "TDIV1", start_location+7],
# Zero both the temporary variables.
["TDIV1", "TDIV1", start_location+8],
["TDIV2", "TDIV2", start_location+9]
]
class ShiftLeftPseudoInstruction(KuugaPseudoInstruction):
"""
A concrete implementation of a function that left shifts the contents of the
first memory location by the number of bits specified in the second
location, storing the result in the first memory location. Descriptions of
inherited fields are given in the abstract class.
"""
@property
def name(self):
return "SHL"
def expand_instruction(self, instruction, start_location):
return [
# Copy the amount to shift into TSHIFT, a temporary variable
["COPY", "TSHIFT", instruction[2], start_location+1],
# Add 1 to the amount to shift because of the branching occurring at
# 0.
["ADD", "TSHIFT", "ON", start_location+2],
# Subtract 1 from TSHIFT, if this produces a negative result then
# keep multiplying by 2. Otherwise break.
["ON", "TSHIFT", start_location+5],
# Doubling and multiplying by 2 are synonymous so double
# the contents of the first memory location.
["ADD", instruction[1], instruction[1], start_location+4],
# Unconditionally branch to the break test.
["Z", "Z", start_location+2]
]
class ShiftRightPseudoInstruction(KuugaPseudoInstruction):
"""
A concrete implementation of a function that right shifts the contents of
the first memory location by the number of bits specified in the second
location, storing the result in the first memory location. Descriptions of
inherited fields are given in the abstract class.
"""
@property
def name(self):
return "SHR"
def expand_instruction(self, instruction, start_location):
return [
# Copy the amount to shift into TSHIFT, a temporary variable
["COPY", "TSHIFT", instruction[2], start_location+1],
# Add 1 to the amount to shift because of the branching occurring at
# 0.
["ADD", "TSHIFT", "ON", start_location+2],
# Subtract 1 from TSHIFT, if this produces a negative result then
# keep multiplying by 2. Otherwise break.
["ON", "TSHIFT", start_location+5],
# Divide the contents of the first memory location by SHC1, a
# constant that is set to the value 2.
["DIV", instruction[1], "SHC1", start_location+4],
# Unconditionally branch to the break test.
["Z", "Z", start_location+2]
]
class COPYPseudoInstruction(KuugaPseudoInstruction):
"""
A concrete implementation of a function that copies the contents of
one memory location to another. Descriptions of inherited fields are given
in the abstract class.
"""
@property
def name(self):
return "COPY"
def expand_instruction(self, instruction, start_location):
return [
# Subtract the contents of the second memory location from TCOPY1,
# a temporary variable set to 0.
[instruction[2], "TCOPY1", start_location+1],
# Subtract TCOPY1 from TCOPY2 so TCOPY2 contains the original value
# stored in the second memory location.
["TCOPY1", "TCOPY2", start_location+2],
# Move the value stored in TCOPY2 to the location of the first
# operand.
["MOVE", instruction[1], "TCOPY2", start_location+3],
# Zero both the temporary variables.
["TCOPY1", "TCOPY1", start_location+4],
["TCOPY2", "TCOPY2", start_location+5]
]
class ANDPseudoInstruction(KuugaPseudoInstruction):
"""
A concrete implementation of a function that calculates the bitwise AND
of the contents of two memory locations. Descriptions of inherited fields
are given in the abstract class.
"""
@property
def name(self):
return "AND"
def expand_instruction(self, instruction, start_location):
return [
# Copy the AND Contants and the second operand to temporary
# locations.
["COPY", "TAND1", "ANDC1", start_location+1],
["COPY", "TAND7", instruction[2], start_location+2],
# Main
# Subtract 1 from TAND1, if zero is reached then branch
["ON", "TAND1", start_location+18],
# Copy the two operands into temporary variables
["COPY", "TAND2", instruction[1], start_location+4],
["COPY", "TAND3", instruction[2], start_location+5],
# Shift the result left by 1 bit so the new bit is exposed
["ADD", "TAND4", "TAND4", start_location+6],
# Subtract the 2nd AND constant from the operands, if
# a negative or zero result is produced then jump to the zero-test
# in either case.
["ANDC2", "TAND2", start_location+12],
["ANDC2", "TAND3", start_location+15],
# Operation
# Set the new bit to the value 1 by adding 1
["ADD", "TAND4", "ON", start_location+9],
# Shift the two operands left by 1
["ADD", instruction[1], instruction[1], start_location+10],
["ADD", instruction[2], instruction[2], start_location+11],
# Unconditonally jump to the break test.
["Z", "Z", start_location+2],
# Zero Check
# Zero the value in TAND5
["TAND5", "TAND5", start_location+13],
# Subtract TAND2 from TAND5, if a 0 results then the bit was present
# originally so go and check the second operand
["TAND2", "TAND5", start_location+7],
# If the result is positive then the original subtraction produced
# a negative result so move onto shifting up both operands.
["TAND5", "TAND5", start_location+9],
# Zero Check2
# Zero the value in TAND6
["TAND6", "TAND6", start_location+16],
# Subtract TAND3 from TAND6, if a 0 results then the bit was present
# originally so go and check the second operand
["TAND3", "TAND6", start_location+8],
# If the result is positive then the original subtraction produced
# a negative result so move onto shifting up both operands.
["TAND6", "TAND6", start_location+9],
# Cleanup
# Move TAND4 and TAND7 to the original memory locations so the
# second operand is retained.
["MOVE", instruction[1], "TAND4", start_location+19],
["MOVE", instruction[2], "TAND7", start_location+20],
# Clean up the temporary locations.
["TAND1", "TAND1", start_location+21],
["TAND2", "TAND2", start_location+22],
["TAND3", "TAND3", start_location+23],
["TAND4", "TAND4", start_location+24]
]
class Gouram(object):
"""
The main class, co-ordinates the unpacking of the test-case files into
executable assembly code for Kuuga.
"""
# Create a dictionary that maps the name of classes to objects of those
# classes.
pseudo_instructions = {c().name: c()
for c in KuugaPseudoInstruction.__subclasses__()}
# Create a raw file reader to make reading in the test case files easier.
raw_file_reader = RawFileReader()
def expand(self, file_name):
"""
The main method in the class, takes a file in and produces a set of
memory contents that can be executed on Kuuga correctly.
:param file_name: A string that represents the name of the test case to
be converted.
:return: A string that represents the memory contents.
"""
# Create a program file to store the data and code from the test case
# files.
program = self.raw_file_reader.split_data_and_code(file_name)
# Process the program object to calculate the data locations and
# expand the programs given.
program = self.process_program_object(program)
# Create the memory contents from the expanded program object.
return self.create_memory_contents(program)
def process_program_object(self, program):
"""
Take in the program object and expand the code, then account for the
placement of data and make sure all the addresses are correct.
:param program: A program object, extracted from the test-case file, to
be expanded
:return: A program object with all expansions completed.
"""
# Expand the program code
expanded_code = self.expand_code(program.code)
# Calculate the value of the program_counter to start with
program_counter = len(expanded_code)
# Add in space for a HALT command
program_counter += 1
# Taking Program Size, Create Memory with enough blanks for Code
program.data = {name: [pair[0]+program_counter, pair[1]]
for name, pair in program.data.items()}
# Store the expanded code in the program object.
program.code = expanded_code
return program
def create_memory_contents(self, program):
"""
Taking the expanded program object create the memory contents that
expanded program entails.
:param program: A program object that contains all sections expanded
and names replaced with memory locations.
:return: The string that represents the memory contents.
"""
# Convert each codeline to hexadecimal and then extend with a HALT
# command.
result = [self.codeline_to_hexadecimal(x, program.data)
for x in program.code]
result.extend(["0x00000001"])
# Create a list of data values because you can't reliably extract
# them from a dictionary in the correct order.
data_vals = sorted(program.data.values(), key=lambda datum: datum[0])
# Extend the program code with all the data values, formatted correctly.
result.extend([format(x[1], "#010x") for x in data_vals])
# Add brackets and commas so the result is a valid C Array.
return "{ " + ", ".join(result) + " }"
@staticmethod
def codeline_to_hexadecimal(codeline, data):
"""
Take in a codeline, and all the data and extract a correct length string
that represents the codeline but in hexadecimal code.
:param codeline: A triple that represents the three arguments to SUBLEQ
:param data: The data from the program object so that locations can
be extracted
:return: A formatted string that represents the codeline as a
hexadecimal string.
"""
# Extract each of the three locations in turn, the first two as data
# items and the third as an address
first_op = data[codeline[0]][0]
second_op = data[codeline[1]][0]
third_op = codeline[2]
# Create a binary string from the operands
binary_string = "{0}{1}{2}00".format(
format(first_op, "010b"), format(second_op, "010b"),
format(third_op, "010b"))
# Take the binary string and return it as a 32-bit hexadecimal string.
return format(int(binary_string, base=2), "#010x")
def expand_code(self, code):
"""
Take the code object, a list of the instructions to the processor,
and expand them so eventually the whole function is encoded in SUBLEQ.
:param code: A list of codelines, a codeline being a list of 4 elements
where the first element is either the name of a pseudo instruction or is
an argument to SUBLEQ, the next two elements are arguments and then the
last element is a line number.
:return: The list of expanded triples where each triple is the three
arguments needed by SUBLEQ.
"""
# Set the result to be the original code list
expanded_code = code
# Until there are no more instructions to expand
while True:
# Assume that there are no more instructions to expand
end_of_loop = True
# Iterate over the expanded code
for counter, code_line in enumerate(expanded_code):
# If the first element of the code line is one of the defined
# pseduo instructions then expand, otherwise just move on
if code_line[0] in self.pseudo_instructions.keys():
# If an instruction is expanded and takes up more space
# then all the destinations need to be altered by the
# amount the list has grown by. So work out which
# instructions will need to be altered.
destinations_to_alter = [
destination
for destination, x in enumerate(expanded_code)
if x[-1] > counter and destination != counter]
# Perform the expansion
expansion = self.pseudo_instructions[code_line[0]].\
expand_instruction(code_line, counter)
# Iterate over the destinations that need to be altered
# and add the offset.
for destination in destinations_to_alter:
expanded_code[destination][-1] += (len(expansion) - 1)
# Insert the expanded code as a replacement for the
# instruction that was expanded.
expanded_code = expanded_code[:counter] + expansion + \
expanded_code[counter+1:]
# Now an expansion has happened it cannot be the case that
# all the instructions were expanded.
end_of_loop = False
# Break out of the inner loop
break
# If no expansions have happened over the course of the whole loop
# then break.
if end_of_loop:
break
# Return the expanded code.
return expanded_code
if __name__ == "__main__":
# Create a Gouram object
g = Gouram()
# Print the result of expanding the test case file given on the command
# line.
print(g.expand(sys.argv[1]))
|
import inquisition as inq
from pathlib import Path
from tqdm import tqdm
import whoosh.index
import argparse
import sys
def parse_args(args):
"""
Returns arguments passed at the command line as a dict
:param args: Command line arguments
:return: args as a dict
"""
parser = argparse.ArgumentParser(description='Create a whoosh index of text')
parser.add_argument('-t', '--text_path', help="Path where text to be indexed is stored.",
required=True, dest='text_path')
parser.add_argument('-a', '--analyzer_type', help='Whoosh analyzer type to use for index',
required=False, default='Stemming', dest='analyzer_type')
parser.add_argument('-ip', '--index_path', help='Path where the index should be stored',
required=True, dest='index_path')
parser.add_argument('-in', '--index_name', help='Name assigned to the index',
required=True, dest='index_name')
return vars(parser.parse_args(args))
def build_index():
args = parse_args(sys.argv[1:])
text_files = inq.get_file_names(args['text_path'])
schema = inq.create_title_and_text_schema(analyzer=args['analyzer_type'])
idx = whoosh.index.create_in(args['index_path'],
schema=schema,
indexname=args['index_name'])
writer = idx.writer()
for file in tqdm(text_files):
path = Path(file)
chapter_title = path.stem
with path.open('r') as f:
chapter_text = f.read()
writer.update_document(
title=chapter_title,
text=chapter_text,
)
writer.commit()
return 1
if __name__ == '__main__':
build_index()
"""
# Get the file list
text_path = '../tests/artifacts/data/king_arthur/*.txt'
analyzer_type = 'Stemming'
index_path = '/Users/saracollins/PycharmProjects/inquisition/inquisition/tests/artifacts/data/king_arthur_idx'
index_name = 'arthur'
"""
|
'''
기본 자료 구조 array
배열의 필요성 : 동일한 자료형을 한번에 관리하기 위함, index 번호로 관리
배열의 장점은 인덱스 번호로 빠르게 찾아 갈 수 있다는 것.
배열의 단점은 배열 생성시에 메모리 할당 범위를 정해 놓고 하기 때문에
새로운 데이터를 추가해서 넣기가 어렵고 (메모리가 고정적) 삭제 시에 중간의 메모리가 비기때문에
앞으로 당겨와야 하는 단점이 있음
Python 은 array 의 향상된 형태이기 때문에, c 와는 다르게 작성해야 되서
배열의 장단점이 와닿지 않는 경우가 많음 (메모리 할당 등을 알 수가 없음)
ex) C
#include <stdio.h>
int main() {
char c[3] = 'US'; -> 프로그래머가 메모리를 직접 할당
printf("%s\n', c);
}
ex) python
c = 'US' -> 프로그래머가 직접 메모리 할당 안함
print(c)
'''
c = 'US'
print(c)
|
"""
Copyright (c) 2016-2020 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from typing import List
from typing import NewType
import datetime
from programy.utils.logging.ylogger import YLogger
from programy.parser.pattern.match import Match
from programy.parser.pattern.nodes.template import PatternTemplateNode
dt = NewType('dt', datetime.datetime)
class MatchContext:
def __init__(self,
max_search_depth: int,
max_search_timeout: int,
matched_nodes: List = None,
template_node: PatternTemplateNode = None,
sentence: str = None,
response: str = None):
self._max_search_depth = max_search_depth
self._max_search_timeout = max_search_timeout
self._total_search_start = datetime.datetime.now()
self._matched_nodes = []
if matched_nodes is not None:
self._matched_nodes = matched_nodes.copy()
if template_node is not None:
assert isinstance(template_node, PatternTemplateNode)
self._template_node = template_node
self._sentence = sentence
self._response = response
@property
def matched_nodes(self) -> List:
return self._matched_nodes
def set_matched_nodes(self, nodes: List):
self._matched_nodes = nodes[:]
@property
def template_node(self) -> PatternTemplateNode:
return self._template_node
@template_node.setter
def template_node(self, template_node: PatternTemplateNode):
assert isinstance(template_node, PatternTemplateNode)
self._template_node = template_node
@property
def response(self) -> str:
return self._response
@response.setter
def response(self, response: str):
self._response = response
@property
def sentence(self) -> str:
return self._sentence
@sentence.setter
def sentence(self, sentence: str):
self._sentence = sentence
@property
def max_search_depth(self) -> int:
return self._max_search_depth
@max_search_depth.setter
def max_search_depth(self, depth: int):
self._max_search_depth = depth
@property
def total_search_start(self) -> dt:
return self._total_search_start
@total_search_start.setter
def total_search_start(self, start: dt):
self._total_search_start = start
@property
def max_search_timeout(self) -> dt:
return self._max_search_timeout
@max_search_timeout.setter
def max_search_timeout(self, timeout: dt):
self._max_search_timeout = timeout
def search_depth_exceeded(self, depth: int) -> bool:
if self._max_search_depth == -1:
return False
return bool(depth > self._max_search_depth)
def total_search_time(self) -> int:
delta = datetime.datetime.now() - self._total_search_start
return int(abs(delta.total_seconds()))
def search_time_exceeded(self) -> bool:
if self._max_search_timeout == -1:
return False
return bool(self.total_search_time() >= self._max_search_timeout)
def add_match(self, match):
self._matched_nodes.append(match)
def pop_match(self):
if self._matched_nodes:
self._matched_nodes.pop()
def pop_matches(self, matches_add):
for _ in range(0, matches_add):
self.pop_match()
def matched(self):
return bool(self._template_node is not None or self._response is not None)
def _get_indexed_match_by_type(self, client_context, index, match_type):
count = 1
for matched_node in self._matched_nodes:
if matched_node.matched_node_type == match_type and matched_node.matched_node_multi_word is True:
if count == index:
return matched_node.joined_words(client_context)
count += 1
return None
def star(self, client_context, index):
return self._get_indexed_match_by_type(client_context, index, Match.WORD)
def topicstar(self, client_context, index):
return self._get_indexed_match_by_type(client_context, index, Match.TOPIC)
def thatstar(self, client_context, index):
return self._get_indexed_match_by_type(client_context, index, Match.THAT)
def list_matches(self, client_context, output_func=YLogger.debug, tabs="\t", include_template=True):
output_func(client_context, "%sMatches..." % tabs)
count = 1
if self._sentence is not None:
output_func(client_context, "%sAsked: %s" % (tabs, self._sentence))
for match in self._matched_nodes:
output_func(client_context, "%s\t%d: %s" % (tabs, count, match.to_string(client_context)))
count += 1
output_func(client_context, "%sMatch score %.2f" % (tabs, self.calculate_match_score()))
if include_template is True:
if self.matched() is True:
if self._response is not None:
output_func(client_context, "%s\tResponse: %s" % (tabs, self._response))
else:
output_func(client_context, "%s\tResponse: None" % tabs)
def calculate_match_score(self):
wildcards = 0
words = 0
for match in self._matched_nodes:
if match.matched_node_type == Match.WORD:
if match.matched_node_wildcard:
wildcards += 1
else:
words += 1
total = wildcards + words
if total > 0:
return (words // (wildcards + words)) * 100.00
return 0.00
def to_json(self):
context={ "max_search_depth":self._max_search_depth,
"max_search_timeout": self._max_search_timeout,
"total_search_start": self._total_search_start.strftime("%d/%m/%Y, %H:%M:%S"),
"sentence": self._sentence,
"response": self._response,
"matched_nodes": []
}
for match in self._matched_nodes:
context["matched_nodes"].append(match.to_json())
return context
@staticmethod
def from_json(json_data):
match_context = MatchContext(0, 0)
match_context.max_search_depth = json_data["max_search_depth"]
match_context.max_search_timeout = json_data["max_search_timeout"]
match_context.total_search_start = datetime.datetime.strptime(json_data["total_search_start"],
"%d/%m/%Y, %H:%M:%S")
match_context.sentence = json_data["sentence"]
match_context.response = json_data["response"]
for match_data in json_data["matched_nodes"]:
match_context.matched_nodes.append(Match.from_json(match_data))
return match_context
|
from mytools import get_time
@get_time
def main():
(v1, v2) = (1, 2)
max = 4 * (10**6)
s = 0
while v2<=max:
if not v2 % 2: s += v2
(v1, v2) = (v2, v1+v2)
print s
@get_time
def main2():
(v1, v2) = (1, 2)
max = 4 * (10**6)
s = 0
while v2 <= max:
s += v2
(v1, v2) = (v1+2*v2, 2*v1+3*v2)
print s
if __name__ == '__main__':
main()
main2() |
{
"targets": [{
"target_name": "dm-codec",
"sources": [
"src/dm-codec.cc",
"src/datamatrix.cc"
],
"include_dirs" : [
"<!@(node -p \"require('node-addon-api').include\")"
],
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions' ],
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'CLANG_CXX_LIBRARY': 'libc++',
'MACOSX_DEPLOYMENT_TARGET': '10.7',
},
'msvs_settings': {
'VCCLCompilerTool': { 'ExceptionHandling': 1 },
},
'defines': [ 'NAPI_CPP_EXCEPTIONS' ],
'conditions': [
['OS=="mac"', {
"include_dirs": [
"/usr/local/include/"
],
"libraries": [
"-L/usr/local/lib",
"libdmtx.dylib"
],
'cflags+': ['-fvisibility=hidden'],
'xcode_settings': {
'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES',
}
}],
['OS=="linux"', {
"libraries": [
"<!(echo /usr/lib/`(which dpkg-architecture > /dev/null && echo \`dpkg-architecture -qDEB_HOST_GNU_TYPE\`/) || echo`libdmtx.so)"
]
}],
]
}],
}
|
from django.contrib import admin
from .models import *
# Register your models here.
# 注册模型Article
class ArticleAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'author', 'text', 'get_read_num')
admin.site.register(Article, ArticleAdmin)
admin.site.register(Diary, ArticleAdmin)
|
# -*- coding: utf-8 -*-
"""
Node discovery and network formation are implemented via a kademlia-like protocol.
The major differences are that packets are signed, node ids are the public keys, and
DHT-related features are excluded. The FIND_VALUE and STORE packets are not implemented.
The parameters necessary to implement the protocol are a
bucket size of 16 (denoted k in Kademlia),
concurrency of 3 (denoted alpha in Kademlia),
and 8 bits per hop (denoted b in Kademlia) for routing.
The eviction check interval is 75 milliseconds,
request timeouts are 300ms, and
the idle bucket-refresh interval is 3600 seconds.
Aside from the previously described exclusions, node discovery closely follows system
and protocol described by Maymounkov and Mazieres.
"""
import operator
import random
import time
from functools import total_ordering
from devp2p import slogging
from .crypto import sha3
from .utils import big_endian_to_int
from rlp.utils import encode_hex, is_integer, str_to_bytes
log = slogging.get_logger('p2p.discovery.kademlia')
k_b = 8 # 8 bits per hop
k_bucket_size = 16
k_request_timeout = 3 * 300 / 1000. # timeout of message round trips
k_idle_bucket_refresh_interval = 3600 # ping all nodes in bucket if bucket was idle
k_find_concurrency = 3 # parallel find node lookups
k_pubkey_size = 512
k_id_size = 256
k_max_node_id = 2 ** k_id_size - 1
def random_nodeid():
return random.randint(0, k_max_node_id)
@total_ordering
class Node(object):
def __init__(self, pubkey):
assert len(pubkey) == 64 and isinstance(pubkey, bytes)
self.pubkey = pubkey
if k_id_size == 512:
self.id = big_endian_to_int(pubkey)
else:
assert k_id_size == 256
self.id = big_endian_to_int(sha3(pubkey))
def distance(self, other):
return self.id ^ other.id
def id_distance(self, id):
return self.id ^ id
def __lt__(self, other):
if not isinstance(other, self.__class__):
return super(Node, self).__lt__(other)
return self.id < other.id
def __eq__(self, other):
if not isinstance(other, self.__class__):
return super(Node, self).__eq__(other)
return self.pubkey == other.pubkey
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.pubkey)
def __repr__(self):
return '<Node(%s)>' % encode_hex(self.pubkey[:4])
class KBucket(object):
"""
Each k-bucket is kept sorted by time last seen—least-recently seen node at the head,
most-recently seen at the tail. For small values of i, the k-buckets will generally
be empty (as no appro- priate nodes will exist). For large values of i, the lists can
grow up to size k, where k is a system-wide replication parameter.
k is chosen such that any given k nodes are very unlikely to fail within an hour of
each other (for example k = 20).
"""
k = k_bucket_size
def __init__(self, start, end):
self.start = start
self.end = end
self.nodes = []
self.replacement_cache = []
self.last_updated = time.time()
@property
def range(self):
return self.start, self.end
@property
def midpoint(self):
return self.start + (self.end - self.start) // 2
def distance(self, node):
return self.midpoint ^ node.id
def id_distance(self, id):
return self.midpoint ^ id
def nodes_by_id_distance(self, id):
assert is_integer(id)
return sorted(self.nodes, key=operator.methodcaller('id_distance', id))
@property
def should_split(self):
depth = self.depth
return self.is_full and (depth % k_b != 0 and depth != k_id_size)
def split(self):
"split at the median id"
splitid = self.midpoint
lower = KBucket(self.start, splitid)
upper = KBucket(splitid + 1, self.end)
# distribute nodes
for node in self.nodes:
bucket = lower if node.id <= splitid else upper
bucket.add_node(node)
# distribute replacement nodes
for node in self.replacement_cache:
bucket = lower if node.id <= splitid else upper
bucket.replacement_cache.append(node)
return lower, upper
def remove_node(self, node):
if node not in self.nodes:
return
self.nodes.remove(node)
def in_range(self, node):
return self.start <= node.id <= self.end
@property
def is_full(self):
return len(self) == k_bucket_size
def add_node(self, node):
"""
If the sending node already exists in the recipient’s k- bucket,
the recipient moves it to the tail of the list.
If the node is not already in the appropriate k-bucket
and the bucket has fewer than k entries,
then the recipient just inserts the new sender at the tail of the list.
If the appropriate k-bucket is full, however,
then the recipient pings the k-bucket’s least-recently seen node to decide what to do.
on success: return None
on bucket full: return least recently seen Node for eviction check
"""
self.last_updated = time.time()
if node in self.nodes: # already exists
self.nodes.remove(node)
self.nodes.append(node)
elif len(self) < self.k: # add if fewer than k entries
self.nodes.append(node)
else: # bucket is full
return self.head
@property
def head(self):
"least recently seen"
return self.nodes[0]
@property
def tail(self):
"last recently seen"
return self.nodes[-1]
@property
def depth(self):
"""
depth is the prefix shared by all nodes in bucket
i.e. the number of shared leading bits
"""
def to_binary(x): # left padded bit representation
b = bin(x)[2:]
return '0' * (k_id_size - len(b)) + b
if len(self.nodes) < 2:
return k_id_size
bits = [to_binary(n.id) for n in self.nodes]
for i in range(k_id_size):
if len(set(b[:i] for b in bits)) != 1:
return i - 1
raise Exception
def __contains__(self, node):
return node in self.nodes
def __len__(self):
return len(self.nodes)
class RoutingTable(object):
def __init__(self, node):
self.this_node = node
self.buckets = [KBucket(0, k_max_node_id)]
def split_bucket(self, bucket):
a, b = bucket.split()
index = self.buckets.index(bucket)
self.buckets[index] = a
self.buckets.insert(index + 1, b)
@property
def idle_buckets(self):
one_hour_ago = time.time() - k_idle_bucket_refresh_interval
return [b for b in self.buckets if b.last_updated < one_hour_ago]
@property
def not_full_buckets(self):
return [b for b in self.buckets if len(b) < k_bucket_size]
def remove_node(self, node):
self.bucket_by_node(node).remove_node(node)
def add_node(self, node):
assert node != self.this_node
# log.debug('add_node', node=node)
bucket = self.bucket_by_node(node)
eviction_candidate = bucket.add_node(node)
if eviction_candidate: # bucket is full
# log.debug('bucket is full', node=node, eviction_candidate=eviction_candidate)
# split if the bucket has the local node in its range
# or if the depth is not congruent to 0 mod k_b
depth = bucket.depth
if bucket.in_range(self.this_node) or (depth % k_b != 0 and depth != k_id_size):
# log.debug('splitting bucket')
self.split_bucket(bucket)
return self.add_node(node) # retry
# nothing added, ping eviction_candidate
return eviction_candidate
return None # successfully added to not full bucket
def bucket_by_node(self, node):
for bucket in self.buckets:
if node.id < bucket.end:
assert node.id >= bucket.start
return bucket
raise Exception
def buckets_by_id_distance(self, id):
assert is_integer(id)
return sorted(self.buckets, key=operator.methodcaller('id_distance', id))
def buckets_by_distance(self, node):
assert isinstance(node, Node)
return self.buckets_by_id_distance(node.id)
def __contains__(self, node):
return node in self.bucket_by_node(node)
def __len__(self):
return sum(len(b) for b in self.buckets)
def __iter__(self):
for b in self.buckets:
for n in b.nodes:
yield n
def neighbours(self, node, k=k_bucket_size):
"""
sorting by bucket.midpoint does not work in edge cases
build a short list of k * 2 nodes and sort and shorten it
"""
assert isinstance(node, Node) or is_integer(node)
if isinstance(node, Node):
node = node.id
nodes = []
for bucket in self.buckets_by_id_distance(node):
for n in bucket.nodes_by_id_distance(node):
if n is not node:
nodes.append(n)
if len(nodes) == k * 2:
break
return sorted(nodes, key=operator.methodcaller('id_distance', node))[:k]
def neighbours_within_distance(self, id, distance):
"""
naive correct version simply compares all nodes
"""
assert is_integer(id)
nodes = list(n for n in self if n.id_distance(id) <= distance)
return sorted(nodes, key=operator.methodcaller('id_distance', id))
class WireInterface(object):
"""
defines the methods used by KademliaProtocol
"""
def send_ping(self, node):
"returns pingid"
def send_pong(self, node, id):
pass
def send_find_node(self, nodeid):
pass
def send_neighbours(self, node, neigbours):
pass
class FindNodeTask(object):
"""
initiating a find_node and the consulting the buckets via neighbours() does not
return the find_node result, as these first need to be pinged and might not end up
in the bucket
"""
def __init__(self, proto, targetid, via_node=None, timeout=k_request_timeout, callback=None):
assert isinstance(proto, KademliaProtocol)
assert is_integer(targetid)
assert not via_node or isinstance(via_node, Node)
self.proto = proto
self.targetid = targetid
self.via_node = via_node
self.timeout = time.time() + timeout
self.callback = callback
if via_node:
self.wire.send_find_node(via_node, targetid)
else:
self._query_neighbours(targetid)
# FIXME, should we return the closest node (allow callbacks on find_request)
class KademliaProtocol(object):
def __init__(self, node, wire):
assert isinstance(node, Node) # the local node
assert isinstance(wire, WireInterface)
self.this_node = node
self.wire = wire
self.routing = RoutingTable(node)
self._expected_pongs = dict() # pingid -> (timeout, node, replacement_node)
self._find_requests = dict() # nodeid -> timeout
self._deleted_pingids = set()
def bootstrap(self, nodes):
assert isinstance(nodes, list)
for node in nodes:
if node == self.this_node:
continue
self.routing.add_node(node)
self.find_node(self.this_node.id, via_node=node)
def update(self, node, pingid=None):
"""
When a Kademlia node receives any message (request or reply) from another node,
it updates the appropriate k-bucket for the sender’s node ID.
If the sending node already exists in the recipient’s k- bucket,
the recipient moves it to the tail of the list.
If the node is not already in the appropriate k-bucket
and the bucket has fewer than k entries,
then the recipient just inserts the new sender at the tail of the list.
If the appropriate k-bucket is full, however,
then the recipient pings the k-bucket’s least-recently seen node to decide what to do.
If the least-recently seen node fails to respond,
it is evicted from the k-bucket and the new sender inserted at the tail.
Otherwise, if the least-recently seen node responds,
it is moved to the tail of the list, and the new sender’s contact is discarded.
k-buckets effectively implement a least-recently seen eviction policy,
except that live nodes are never removed from the list.
"""
assert isinstance(node, Node)
log.debug('in update', remoteid=node, localid=self.this_node)
# check node is not self
# if ping was expected
# if it is not timed out
# add to bucket
# optinally set replacement
# check for not full buckets
# ping nodes from replacement cache
# check for inactive buckets
# ping nodes
# prune timed out find_list
# prune timed out expected_ping list
# ping replacements
if node == self.this_node:
log.debug('node is self', remoteid=node)
return
def _expected_pongs():
return set(v[1] for v in self._expected_pongs.values())
if pingid and (pingid not in self._expected_pongs):
assert pingid not in self._expected_pongs
log.debug('surprising pong', remoteid=node,
expected=_expected_pongs(), pingid=encode_hex(pingid)[:8])
if pingid in self._deleted_pingids:
log.debug('surprising pong was deleted')
else:
for key in self._expected_pongs:
if key.endswith(node.pubkey):
log.debug('waiting for ping from node, but echo mismatch', node=node,
expected_echo=encode_hex(key[:len(node.pubkey)][:8]),
received_echo=encode_hex(pingid[:len(node.pubkey)][:8]))
return
# check for timed out pings and eventually evict them
for _pingid, (timeout, _node, replacement) in list(self._expected_pongs.items()):
if time.time() > timeout:
log.debug('deleting timedout node', remoteid=_node,
pingid=encode_hex(_pingid)[:8])
self._deleted_pingids.add(_pingid) # FIXME this is for testing
del self._expected_pongs[_pingid]
self.routing.remove_node(_node)
if replacement:
log.debug('adding replacement', remoteid=replacement)
self.update(replacement)
return
if _node == node: # prevent node from being added later
return
# if we had registered this node for eviction test
if pingid in self._expected_pongs:
timeout, _node, replacement = self._expected_pongs[pingid]
log.debug('received expected pong', remoteid=node)
if replacement:
log.debug('adding replacement to cache', remoteid=replacement)
self.routing.bucket_by_node(replacement).replacement_cache.append(replacement)
del self._expected_pongs[pingid]
# add node
eviction_candidate = self.routing.add_node(node)
if eviction_candidate:
log.debug('could not add', remoteid=node, pinging=eviction_candidate)
# protocol should ping bucket head and evict if there is no response
self.ping(eviction_candidate, replacement=node)
else:
log.debug('added', remoteid=node)
# check for not full buckets and ping replacements
for bucket in self.routing.not_full_buckets:
for node in bucket.replacement_cache:
self.ping(node)
# check idle buckets
"""
idle bucket refresh:
for each bucket which hasn't been touched in 3600 seconds
pick a random value in the range of the bucket and perform discovery for that value
"""
for bucket in self.routing.idle_buckets:
rid = random.randint(bucket.start, bucket.end)
self.find_node(rid)
# check and removed timed out find requests
self._find_requests = {
nodeid: timeout
for nodeid, timeout in self._find_requests.items()
if time.time() <= timeout
}
log.debug('updated', num_nodes=len(self.routing), num_buckets=len(self.routing.buckets))
def _mkpingid(self, echoed, node):
assert node.pubkey
pid = str_to_bytes(echoed) + node.pubkey
log.debug('mkpingid', echoed=encode_hex(echoed), node=encode_hex(node.pubkey))
return pid
def ping(self, node, replacement=None):
"""
successful pings should lead to an update
if bucket is not full
elif least recently seen, does not respond in time
"""
assert isinstance(node, Node)
assert node != self.this_node
log.debug('pinging', remote=node, local=self.this_node)
echoed = self.wire.send_ping(node)
pingid = self._mkpingid(echoed, node)
assert pingid
timeout = time.time() + k_request_timeout
log.debug('set wait for pong from', remote=node, local=self.this_node,
pingid=encode_hex(pingid)[:4])
self._expected_pongs[pingid] = (timeout, node, replacement)
def recv_ping(self, remote, echo):
"udp addresses determined by socket address of revd Ping packets" # ok
"tcp addresses determined by contents of Ping packet" # not yet
assert isinstance(remote, Node)
log.debug('recv ping', remote=remote, local=self.this_node)
if remote == self.this_node:
log.warn('recv ping from self?!')
return
self.update(remote)
self.wire.send_pong(remote, echo)
def recv_pong(self, remote, echoed):
"tcp addresses are only updated upon receipt of Pong packet"
assert remote != self.this_node
pingid = self._mkpingid(echoed, remote)
log.debug('recv pong', remote=remote, pingid=encode_hex(pingid)[:8], local=self.this_node)
# update address (clumsy fixme)
if hasattr(remote, 'address'): # not available in tests
nnodes = self.routing.neighbours(remote)
if nnodes and nnodes[0] == remote:
nnodes[0].address = remote.address # updated tcp address
# update rest
self.update(remote, pingid)
def _query_neighbours(self, targetid):
for n in self.routing.neighbours(targetid)[:k_find_concurrency]:
self.wire.send_find_node(n, targetid)
def find_node(self, targetid, via_node=None):
# FIXME, amplification attack (need to ping pong ping pong first)
assert is_integer(targetid)
assert not via_node or isinstance(via_node, Node)
self._find_requests[targetid] = time.time() + k_request_timeout
if via_node:
self.wire.send_find_node(via_node, targetid)
else:
self._query_neighbours(targetid)
# FIXME, should we return the closest node (allow callbacks on find_request)
def recv_neighbours(self, remote, neighbours):
"""
if one of the neighbours is closer than the closest known neighbour
if not timed out
query closest node for neighbours
add all nodes to the list
"""
assert isinstance(neighbours, list)
log.debug('recv neighbours', remoteid=remote, num=len(neighbours), local=self.this_node,
neighbours=neighbours)
neighbours = [n for n in neighbours if n != self.this_node]
neighbours = [n for n in neighbours if n not in self.routing]
# we don't map requests to responses, thus forwarding to all FIXME
for nodeid, timeout in self._find_requests.items():
assert is_integer(nodeid)
closest = sorted(neighbours, key=operator.methodcaller('id_distance', nodeid))
if time.time() < timeout:
closest_known = self.routing.neighbours(nodeid)
closest_known = closest_known[0] if closest_known else None
assert closest_known != self.this_node
# send find_node requests to k_find_concurrency closests
for close_node in closest[:k_find_concurrency]:
if not closest_known or \
close_node.id_distance(nodeid) < closest_known.id_distance(nodeid):
log.debug('forwarding find request', closest=close_node,
closest_known=closest_known)
self.wire.send_find_node(close_node, nodeid)
# add all nodes to the list
for node in neighbours:
if node != self.this_node:
self.ping(node)
def recv_find_node(self, remote, targetid):
# FIXME, amplification attack (need to ping pong ping pong first)
assert isinstance(remote, Node)
assert is_integer(targetid)
self.update(remote)
found = self.routing.neighbours(targetid)
log.debug('recv find_node', remoteid=remote, found=len(found))
self.wire.send_neighbours(remote, found)
|
#mini.py
def foo():
print("这是模块mini的函数foo")
if __name__ =="__main__":
print("这是一个模块文件mini")
|
import pygame
import sys
import colors
days_of_week = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
month_length = 28
current_date = 1
def get_day_of_week(date, month_length):
return days_of_week[(date - 1) % len(days_of_week)]
def inc_date():
global current_date
current_date = (current_date % month_length) + 1
def dec_date():
global current_date
current_date = current_date - 1 if current_date != 1 else month_length
pygame.init()
pygame.font.init()
screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
pygame.display.set_caption("Rescue")
width, height = pygame.display.get_surface().get_size()
done = False
font = pygame.font.SysFont("Arial", 25)
def draw_text(screen, text, color, pos):
text_surface = font.render(text, False, color)
text_rect = text_surface.get_rect(center = pos)
screen.blit(text_surface, text_rect)
class Button:
def __init__(self, pos, dims, colors, text):
self.pos = pos
self.dims = dims
self.colors = colors # list of 3 colors : [passive, hover, clicked]
self.color = self.colors[0]
self.text = text
def get_center(self):
return self.pos[0] + self.dims[0] / 2, self.pos[1] + self.dims[1] / 2
def mouse_is_over(self, mouse_pos):
mouse_x, mouse_y = mouse_pos; x, y = self.pos; w, h = self.dims
return mouse_x >= x and mouse_x <= x + w and mouse_y >= y and mouse_y <= y + h
def change_color(self, idx):
self.color = self.colors[idx]
def display(self, screen):
pygame.draw.rect(screen, self.color, pygame.Rect(*self.pos, *self.dims))
draw_text(screen, self.text, colors.BLACK, self.get_center())
button_width = width * 0.075
next_day_button = Button(pos = (width - button_width, height - button_width), dims = (button_width, button_width), colors = [colors.GREEN, colors.DARKER_GREEN, colors.EVEN_DARKER_GREEN], text = "Next Day")
prev_day_button = Button(pos = (width - 2 * button_width, height - button_width), dims = (button_width, button_width), colors = [colors.SKY, colors.DARK_SKY, colors.DARKER_SKY], text = "Previous Day")
buttons = {"nd" : next_day_button, "pd" : prev_day_button}
def draw_buttons(screen):
for button in buttons:
buttons[button].display(screen)
def get_button_under_mouse(buttons, mouse_pos):
for button in buttons:
if button.mouse_is_over(mouse_pos):
return button
return None
def button_clicked(button_key):
if button_key == "nd":
inc_date()
elif button_key == "pd":
dec_date()
while not done:
events = pygame.event.get()
mouse_x, mouse_y = pygame.mouse.get_pos()
pressed_tuple = pygame.mouse.get_pressed()
mouse_is_pressed = pressed_tuple[0] # left btn
mouse_over_button_key = ""
for button_key in buttons:
button = buttons[button_key]
if button.mouse_is_over((mouse_x, mouse_y)):
mouse_over_button_key = button_key
if mouse_is_pressed:
button.change_color(2)
else:
button.change_color(1)
else:
button.change_color(0)
for event in events:
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
done = True
break
elif event.type == pygame.MOUSEBUTTONUP:
if mouse_over_button_key:
button_clicked(mouse_over_button_key)
screen.fill(colors.WHITE)
draw_buttons(screen)
#draw_text(screen, "test", colors.BLACK, (width - button_width / 2, height - button_width / 2))
draw_text(screen, get_day_of_week(current_date, month_length), colors.BLACK, (width / 2, height / 2))
pygame.display.flip()
|
#----------------------------------#
#-------- MailCleaner v0.1 --------#
#----------------------------------#
# #
# by: Alessandro Carrara(alkz) #
# email: alkz.0x80@gmail.com #
# build date: 2011-03-22 #
# for: PoliGrafica SRL #
# #
#----------------------------------#
#----------------------------------#
import os
import sys
import glob
import pprint
import re
import smtplib
from email.mime.text import MIMEText
DEBUG = False
#-----------------------------#
#---------- Consts -----------#
#-----------------------------#
# Operations
REMOVE = 1
SUB = 2
RESEND = 3
DELETE = 4
#Others
MAILSERVER = "10.0.0.3"
VERSION = "0.1"
#-----------------------------#
#---------- Globals ----------#
#-----------------------------#
rimosse = 0
sostituite = 0
rispedite = 0
cancellate = 0
#-----------------------------#
#--------- Functions ---------#
#-----------------------------#
def mergeEmail(tupla):
return (tupla[0] + "@" + tupla[1])
#-----------------------------#
#-----------------------------#
def cleanAddress(s):
extractEmailRegEx = "(\\S+)@(\\S+)"
pattern = re.compile(extractEmailRegEx)
result = pattern.findall(s)
mergedEmail = mergeEmail(result[0])
mergedEmail = mergedEmail.strip("<>,:\"")
return mergedEmail
#-----------------------------#
#-----------------------------#
# Ritorna un dict con tutte le info sulla email
def getInfoEmail(filename):
f = open(filename, "r")
fields = open("fields.txt", "r") # Campi dell'header email da cercare
info = {}
startLineMessage = 0
for s in f.readlines():
if(s == "\n"): # Header email finito
break
s = s.lower()
l = s.split(": ", 1)
fields.seek(0)
for s1 in fields.readlines():
if(s1[0] == '#' or s1[0] == '\n'): # Commento o riga vuota nel file
continue
s1 = s1.strip()
s1 = s1.lower()
if(l[0] == s1):
info[l[0]] = l[1].strip()
startLineMessage += 1
fields.close()
f.seek(0)
s = f.read()
f.close()
l = s.split("\n")
message = l[startLineMessage+1:]
# Let's trim a bit
clearMessage = []
for i in range(len(message)):
message[i] = message[i].strip()
if(message[i] != ''):
clearMessage.append(message[i])
info["content"] = clearMessage
return info
#-----------------------------#
#-----------------------------#
def isThereKeyWord(info, fkey):
found = False
for key in info.keys():
if(key == "eml" or key == "content"):
continue
fkey.seek(0)
for word in fkey.readlines():
word = word.strip()
# Scarto righe vuote o commenti
if(word == ''):
continue
if(len(word) > 1 and word[0] == '#'):
continue
if(info[key].find(word) > -1): # Parola chiave trovata, da rimuovere
if(DEBUG):
print("TROVATA! Parola: " + word + " Nel campo: " + key + "("+ info[key] + ")")
return True
return False
#-----------------------------#
#-----------------------------#
def getEmailAddress(content):
tmp = []
emails = []
isValidEmailRegEx = "^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$"
extractEmailRegEx = "(\\S+)@(\\S+)"
pattern = re.compile(extractEmailRegEx)
patternValid = re.compile(isValidEmailRegEx)
for line in content:
result = pattern.findall(line)
if(len(result) > 0):
tmp.append(result[0]) #N.B.: Struttura tmp = [ tupla1(nome, domain), tupla2(nome, domain) ]
for i in range(len(tmp)):
mergedEmail = mergeEmail(tmp[i])
mergedEmal = mergedEmail.strip("<>,:\"")
if(patternValid.match(mergedEmail) != None):
emails.append(mergedEmail)
toReturn = []
#pprint.pprint(emails)
if (len(emails) > 0):
for email in emails:
if( (email.split("@"))[1].find("quippe.it") > -1 ): # Scarto le email di quippe
email = "-1"
else:
toReturn.append(email) # Mi tengo le email valide
return toReturn
#-----------------------------#
#-----------------------------#
# Identifica se l'email e' da rimuovere dal db, sostituire, rispedire oppure semplicemente da cancellare
def riddleMail(info):
global rimosse
global sostituite
global rispedite
global cancellate
key_words = open("keyWords/rimuovere.txt", "r")
if(isThereKeyWord(info, key_words)):
op = REMOVE
key_words.close()
else: # Se non e' da rimuovere controllo se e' da sostituire
key_words = open("keyWords/sostituire.txt", "r")
if(isThereKeyWord(info, key_words)):
op = SUB
key_words.close()
else: # Oppure da rispedire
key_words = open("keyWords/rispedire.txt", "r")
if(isThereKeyWord(info, key_words)):
op = RESEND
key_words.close()
else: # Insomma e' da cancellare
op = DELETE
if(op == REMOVE):
# 'Ghetto' lol gli indirizzi email da rimuovere
emails = getEmailAddress(info["content"])
if(DEBUG):
pprint.pprint(emails)
if(len(emails) == 0): # Email passata come remove, ma in realta' da cancellare
op = DELETE
cancellate += 1
if(DEBUG):
print ("Email da cancellare\n")
return op
out = open("output/daRimuovere.txt", "a")
out.write(emails[0] + "\n")
out.close()
rimosse += 1
if(DEBUG):
print ("Email da rimuovere\n")
elif(op == SUB):
# 'Ghetto' lol gli indirizzi email da sostituire
emails = getEmailAddress(info["content"])
if(DEBUG):
pprint.pprint(emails)
if(len(emails) == 0): # Non hanno specificato quello nuovo, cancello l'email
op = DELETE
cancellate += 1
if(DEBUG):
print ("Email da cancellare\n")
return op
# Nel caso avessero specificato piu' di un indirizzo si usa comunque il primo
out = open("output/daSostituire.txt", "a")
toWrite = cleanAddress(info["from"])
out.write(toWrite + ";" + emails[0] + "\n")
out.close()
sostituite += 1
if(DEBUG):
print ("Email da sostituire\n")
elif(op == RESEND):
rispedite += 1
toWrite = cleanAddress(info["from"])
out = open("output/daRispedire.txt", "a")
out.write(toWrite + "\n")
out.close()
if(DEBUG):
print("Email da rispedire a: " + toWrite + "\n")
else:
cancellate += 1
if(DEBUG):
print ("Email da cancellare\n")
return op
#-----------------------------#
#-----------------------------#
def deleteDuplicates(filename):
f = open(filename, "r")
pathNewFile = os.path.dirname(filename)+ "/tmp.txt"
fNew = open(pathNewFile, "w+")
exists = False
for line in f.readlines():
exists = False
fNew.seek(0)
for line1 in fNew.readlines():
if(line == line1):
exists = True
break
if(exists == False):
fNew.seek(0, os.SEEK_END)
fNew.write(line)
fNew.close()
f.close()
os.remove(filename)
os.renames(pathNewFile, filename)
#-----------------------------#
#-----------------------------#
def sendConfirm(dest):
msg = MIMEText("Conferma Email - Quippe.it")
sender = "info@quippe.it"
msg['Subject'] = 'Conferma Email - Quippe.it'
msg['From'] = sender
msg['To'] = dest
s = smtplib.SMTP(MAILSERVER) # ServerMail 10.0.0.3
try:
s.sendmail(sender, dest, msg.as_string())
except:
print("Errore imprevisto nell'invio dell'email")
s.quit()
#-----------------------------#
#-----------------------------#
os.system("cls")
print ("MailCleaner v" + VERSION + " - by alkz 2011\n\n")
if(len(sys.argv) < 2):
print("Argomenti mancanti!\n")
input("")
exit(1)
if(len(sys.argv) > 2):
print("Troppi Argomenti!\n")
input("")
exit(2)
print("File: " + sys.argv[1])
input("Per continuare premere un tasto, altrimenti CTRL+C\n")
os.chdir(os.path.dirname(sys.argv[0]))
# Pulisco i file
f = open("output/daRimuovere.txt", "w")
f.close()
f = open("output/daSostituire.txt", "w")
f.close()
f = open("output/daRispedire.txt", "w")
f.close()
# Converto in formato eml
os.system("DbxConv.exe -overwrite -eml " + sys.argv[1])
# Rimuovo l'estensione del file .dbx per avere la directory in cui dbxconv ha sbattuto i file eml
for i in range(len(sys.argv[1])-1, 0, -1):
if(sys.argv[1][i] == '.'):
break
path = sys.argv[1][:i] + "\\"
print("Directory files .eml: " + path)
inp = input("\nVuoi visualizzare dei messaggi per eventuale debugging? <y/n>(default n): ")
if(inp == 'y'):
DEBUG = True
print("\nRiddling emails...\n")
if(DEBUG):
print("#------------------------------------------------------------------#\n")
emails = 0
InfoEmails = []
# Ogni email ha il proprio dict descrittivo
for infile in glob.glob(os.path.join(path, '*.eml')):
e = {}
emails += 1
if(DEBUG):
print ("File: " + str(infile.encode("utf-8")) + "\n")
try:
e = getInfoEmail(infile)
# Crivello l'email
op = riddleMail(e)
e["eml"] = infile
e["operation"] = op
InfoEmails.append(e)
except:
print ("File: " + str(infile.encode("utf-8")) + "\n")
input("Errore di lettura imprevisto")
if(DEBUG):
print("#------------------------------------------------------------------#\n")
deleteDuplicates("output/daRispedire.txt")
deleteDuplicates("output/daRimuovere.txt")
print ("\nFinished! gli outputs sono stati salvati nella directory output.\n")
risp = input("Ci sono " + str(rispedite) + " email da rispedire, Rispedirle ora? <y/n>(default y): ")
if(risp != 'n'):
for e in InfoEmails:
if(e["operation"] == RESEND):
f = open("output/daRispedire.txt", "r")
for email in f.readlines():
sendConfirm(email)
f = open("output/daRispedire.txt", "w") # Cancella il contenuto
else:
print("Email non rispedite")
print ("\nEmail totali scansionate: " + str(emails))
print ("Email da rimuovere dal db: " + str(rimosse))
print ("Email da sostituire nel db: " + str(sostituite))
print ("Email rispedite o da rispedire: " + str(rispedite))
print ("Email da cancellare: " + str(cancellate))
print ("Email impossibili da leggere: " + str(emails-rimosse-sostituite-rispedite-cancellate))
input("")
exit(0)
|
#!/usr/bin/python
import os, sys, shlex
from glob import glob
from subprocess import call
from optparse import OptionParser
dirsNotFound = []
options = []
args = []
def dcm2nii(file):
niis = glob("nii/*.nii.gz")
if len(niis) > 0:
for f in niis:
os.remove(f)
niiCmd = 'dcm2nii -a y -f y -d n -e n -i n -p n -r Y -o "nii" %s' % (file)
print niiCmd
call(shlex.split(niiCmd))
def goProc():
global dirsNotFound, options, args
rootdir = os.getcwd()
for basedir in args:
os.chdir(rootdir)
if not os.path.isdir(basedir):
# not a valid dir, skip
dirsNotFound.append(basedir)
continue
os.chdir(basedir)
search = options.search
dirs = glob(search)
print dirs
absbase = os.getcwd()
for dti in dirs:
os.chdir(absbase)
if not os.path.isdir(dti+"/dicom"):
dicom2NrrdCmd = 'eddycor.py -s "%s" .' % (search)
print dicom2NrrdCmd
call(shlex.split(dicom2NrrdCmd))
os.chdir(dti)
print os.getcwd()
if os.path.isdir("dicom"):
file = glob('dicom/*.dcm')[0]
if not os.path.isdir("nii"):
os.mkdir("nii")
dcm2nii(file)
elif not len(glob("nii/*_FA.nii.gz")) > 0:
# Convert dicoms to nii.gz
dcm2nii(file)
os.chdir("nii")
niiFile = glob("*.nii.gz")[0]
print niiFile
eddycorGlob = glob('*eddycor.nii.gz*')
print eddycorGlob
data = os.path.isfile('data.nii.gz')
print 'has data.nii.gz=',
print data
stemname = niiFile.split('_')[0]
if len( eddycorGlob ) == 0 and not data:
eddyfile = stemname + '_eddycor.nii.gz'
print eddyfile
eddyCmd = 'eddy_correct %s %s 0' % (niiFile, eddyfile)
print eddyCmd
if not os.path.isfile(eddyfile):
# fsl eddy current correction
call(shlex.split(eddyCmd))
else:
if data:
eddyfile = 'data.nii.gz'
else:
eddyfile = eddycorGlob[0]
betCmd = 'bet %s bet_brain -R -n -m -f %s' % (eddyfile, options.betfrac)
print betCmd
if not os.path.isfile('bet_brain'):
call(shlex.split(betCmd))
if ( os.path.isfile('bvecs')):
bvecs = 'bvecs'
else:
bvecs = glob("*.bvec")[0]
if ( os.path.isfile('bvals')):
bvals = 'bvals'
else:
bvals = glob("*.bval")[0]
dtifitCmd = 'dtifit --data=%s --out=%s --mask=bet_brain_mask --bvecs=%s --bvals=%s' % (eddyfile, stemname, bvecs, bvals)
print dtifitCmd
if len(glob("*_FA.nii.gz")) == 0:
call(shlex.split(dtifitCmd))
if len(dirsNotFound) > 0:
print "These dirs where not found: "
for i in dirsNotFound:
print i
if __name__ == '__main__':
parser = OptionParser(usage="Usage: %prog [options] <subject_dir>")
parser.add_option("-s", "--search", dest="search", default='*DTI*', help="DTI dir name to search for. i.e *DTI*")
parser.add_option("-f", "--betfrac", dest="betfrac", default='0.2', help='BET command fraction, default = 0.2')
#parser.add_option("-d", "--dti_dir", dest="dir", help="Only process this directory, if set will ignore -s options and any arg supplied")
#parser.add_option("-n", "--name", dest="name", help="Base subject name for beautifying output")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(2)
else:
goProc() |
# Generated by Django 2.2.9 on 2020-07-18 12:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('web', '0023_auto_20200704_1346'),
]
operations = [
migrations.CreateModel(
name='StockAnalysisData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True, verbose_name='作成日時')),
('updated_at', models.DateTimeField(auto_now=True, null=True, verbose_name='更新日時')),
('date', models.DateField(verbose_name='日付')),
('val_close_dy', models.FloatField(verbose_name='終値前日比')),
('val_close_dy_pct', models.FloatField(verbose_name='終値前日比率')),
('turnover_dy', models.FloatField(verbose_name='出来高前日比')),
('turnover_dy_pct', models.FloatField(verbose_name='出来高前日比率')),
('val_line', models.FloatField(verbose_name='ローソク長')),
('val_line_pct', models.FloatField(verbose_name='ローソク長率')),
('is_positive', models.BooleanField(verbose_name='陽線')),
('lower_mustache', models.FloatField(verbose_name='下ヒゲ')),
('upper_mustache', models.FloatField(verbose_name='上ヒゲ')),
('ma05', models.FloatField(verbose_name='移動平均(5日)')),
('ma25', models.FloatField(verbose_name='移動平均(25日)')),
('ma75', models.FloatField(verbose_name='移動平均(75日)')),
('ma05_diff', models.FloatField(help_text='終値ー5日移動平均', verbose_name='移動平均乖離(5日)')),
('ma25_diff', models.FloatField(help_text='終値ー25日移動平均', verbose_name='移動平均乖離(25日)')),
('ma75_diff', models.FloatField(help_text='終値ー75日移動平均', verbose_name='移動平均乖離(75日)')),
('ma05_diff_pct', models.FloatField(verbose_name='移動平均乖離率(5日)')),
('ma25_diff_pct', models.FloatField(verbose_name='移動平均乖離率(25日)')),
('ma75_diff_pct', models.FloatField(verbose_name='移動平均乖離率(75日)')),
('sigma25', models.FloatField(verbose_name='標準偏差(25日)')),
('ma25_p2sigma', models.FloatField(verbose_name='ボリンジャーバンド+2σ(25日)')),
('ma25_m2sigma', models.FloatField(verbose_name='ボリンジャーバンド-2σ(25日)')),
('is_upper05', models.BooleanField(help_text='前日移動平均値より上(5日)', verbose_name='上昇傾向(5日)')),
('is_upper25', models.BooleanField(help_text='前日移動平均値より上(25日)', verbose_name='上昇傾向(25日)')),
('is_upper75', models.BooleanField(help_text='前日移動平均値より上(75日)', verbose_name='上昇傾向(75日)')),
('is_takuri', models.BooleanField(help_text='長い下ヒゲ陰線', verbose_name='たくり線')),
('is_tsutsumi', models.BooleanField(help_text='前日ローソクを包み込む、大きいローソク', verbose_name='包線')),
('is_harami', models.BooleanField(help_text='前日ローソクに包まれる、小さいローソク', verbose_name='はらみ線')),
('is_age_sanpo', models.BooleanField(help_text='大陽線後→3本のローソクが収まる→最初の陽線終値をブレイク', verbose_name='上げ三法')),
('is_sage_sanpo', models.BooleanField(help_text='大陰線後→3本のローソクが収まる→最初の陰線終値を割り込み', verbose_name='下げ三法')),
('is_sanku_tatakikomi', models.BooleanField(help_text='3日連続の窓開き下落', verbose_name='三空叩き込み')),
('is_sante_daiinsen', models.BooleanField(help_text='3日連続の大陰線', verbose_name='三手大陰線')),
('stock', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web.Stock')),
],
),
]
|
import threading
import logging
from common.Common import Locking
from Logger import Logger
class Cache:
'''LRU strategy'''
class Node:
__slots__ = ['key', 'val', 'succ', 'prev']
def __init__(self, key, val):
self.key = key
self.val = val
self.succ = None
self.prev = None
def __init__(self, maxElemNum):
assert maxElemNum > 0
self._chunkHead = None
self._chunkTail = None
self._hashTable = {}
self._maxElemNum = maxElemNum
self._curElemNum = 0
self._logger = Logger.Get('LRU')
def Clear(self):
self._chunkHead = None
self._chunkTail = None
self._hashTable.clear()
self._curElemNum = 0
def Fetch(self, key):
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('fetch ' + str(self._hashTable))
if key in self._hashTable:
self._Update(key)
return self._hashTable[key].val
else:
return None
def Add(self, key, value):
if key in self._hashTable:
if self._logger.isEnabledFor(logging.ERROR):
self._logger.error('can not insert same key ' + str(key))
return
newNode = Cache.Node(key, value)
self._hashTable[key] = newNode
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('add ' + str(self._hashTable))
if self._curElemNum == self._maxElemNum:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('remove ' + str(self._head().key))
self._hashTable.pop(self._head().key)
self._Pop()
else:
self._curElemNum += 1
self._Push(newNode)
def Remove(self, key):
nodeToDel = self._hashTable[key]
if nodeToDel == self._chunkHead:
self._chunkHead = self._chunkHead.succ
if nodeToDel == self._chunkTail:
self._chunkTail = self._chunkTail.prev
if nodeToDel.prev != None:
nodeToDel.prev = nodeToDel.succ
self._hashTable.pop(key)
self._curElemNum -= 1
def size(self):
return self._curElemNum
def capacity(self):
return self._maxElemNum
def _Update(self, key):
cur = self._hashTable[key]
if cur == self._chunkTail:
return
if cur == self._chunkHead:
self._chunkHead = self._chunkHead.succ
if cur.prev != None:
cur.prev.succ = cur.succ
if cur.succ != None:
cur.succ.prev = cur.prev
cur.succ = None
cur.prev = self._chunkTail
self._chunkTail.succ = cur
self._chunkTail = cur
def _head(self):
return self._chunkHead
def _tail(self):
return self._chunkTail
def _Pop(self):
top = self._chunkHead
self._chunkHead = self._chunkHead.succ
if self._chunkHead == None:
self._chunkTail = None
del top
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('pop ' + 'chunkHead ' +\
'empty ' if self._chunkHead == None\
else str(self._chunkHead.key) +\
'chunkTail ' + 'empty '\
if self._chunkTail == None\
else str(self._chunkTail.key))
def _Push(self, node):
if self._chunkHead == None:
self._chunkHead = node
if self._chunkTail != None:
self._chunkTail.succ = node
node.prev = self._chunkTail
self._chunkTail = node
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('push ' + 'chunkHead ' +\
str(self._chunkHead.key) +\
'chunkTail ' + str(self._chunkTail.key))
class ThreadSafeCache:
# TODO we need to test the performance
def __init__(self, maxElemNum):
self._cache = Cache(maxElemNum)
self._lock = threading.Lock()
def Fetch(self, key):
return self._cache.Fetch(key)
def Clear(self):
with Locking(self._lock):
self._cache.Clear()
def Add(self, key, value):
with Locking(self._lock):
self._cache.Add(key, value)
def Remove(self, key):
with Locking(self._lock):
self._cache.Remove(key)
|
import numpy as np
import os
def read_list(list_file_path):
with open(list_file_path) as f:
lines = f.readlines()
frame_list = []
for i, line in enumerate(lines):
if line.startswith('#'):
continue
tokens = line.split(' ')
frame_list.append(tokens[0].strip())
return frame_list |
#!/usr/bin/python
__author__ = "Donghoon Lee"
__copyright__ = "Copyright 2016"
__credits__ = ["Donghoon Lee"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Donghoon Lee"
__email__ = "donghoon.lee@yale.edu"
###
### Predict Y and save results as NPY
###
### Usage: python postproc_modelPred.py -m splicing_model_lstm1 -e E028 --core
###
import argparse
import preproc_loadData
import model_eval
parser = argparse.ArgumentParser(description='Evaluate Model')
parser.add_argument('-m','--model', help='model name',required=True)
parser.add_argument('-e','--eid', help='sample eid',required=True)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--full', dest='feature', action='store_true')
group.add_argument('--core', dest='feature', action='store_false')
group.set_defaults(feature=True)
args = parser.parse_args()
###
MODEL_NAME = args.model
EID = args.eid
### LOAD DATA ###
if args.feature:
print "Loading Full Dataset"
MODEL_NAME = MODEL_NAME+"_"+EID+"_full"
_, inputX, inputY = preproc_loadData.loadData(EID)
else:
print "Loading Core Dataset"
MODEL_NAME = MODEL_NAME+"_"+EID+"_core"
inputX, _, inputY = preproc_loadData.loadData(EID)
### LOAD MODEL ###
model = model_eval.loadModel(MODEL_NAME)
### PREDICT ###
inputX_3acc = inputX[:,0:inputX.shape[1]/2,:]
inputX_5don = inputX[:,inputX.shape[1]/2:inputX.shape[1],:]
predY = model_eval.predModel(model, [inputX_3acc, inputX_5don])
model_eval.save2npy(MODEL_NAME+"_predY.npy",predY)
### ROC AUC ###
roc_auc = model_eval.calcROC_AUC(inputY, predY)
print 'Test ROC AUC:', roc_auc
### F1 ###
f1 = model_eval.calcF1(inputY, predY)
print 'Test F1 Score:', f1 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
import os
import time
import urllib.request
try: #python3
from urllib.request import urlopen
except: #python2
from urllib2 import urlopen
from PyQt5.QtWidgets import QApplication, QDesktopWidget
from PyQt5.QtCore import QObject, pyqtSlot, QUrl, Qt, QPoint
from PyQt5.QtWebChannel import QWebChannel
from PyQt5.QtWebEngineWidgets import QWebEngineView
from httpServe.server_tool import http_server_main
from httpServe import *
__Author__ = '''wenye'''
class CallHandler(QObject):
def __init__(self):
super(CallHandler, self).__init__()
@pyqtSlot(str, result=str) # 第一个参数即为回调时携带的参数类型
def init_home(self, str_args):
print('call received')
print('resolving......init home..')
print(str_args) # 查看参数
# #####
# 这里写对应的处理逻辑比如:
msg = '收到来自python的消息'
view.page().runJavaScript("alert('%s')" % msg)
view.page().runJavaScript("window.say_hello('%s')" % msg)
return 'hello, Python'
class WebEngine(QWebEngineView):
def __init__(self):
super(WebEngine, self).__init__()
self.setContextMenuPolicy(Qt.NoContextMenu) # 设置右键菜单规则为自定义右键菜单
# self.customContextMenuRequested.connect(self.showRightMenu) # 这里加载并显示自定义右键菜单,我们重点不在这里略去了详细带吗
self.setWindowTitle('QWebChannel与前端交互')
self.resize(1100, 650)
cp = QDesktopWidget().availableGeometry().center()
self.move(QPoint(cp.x() - self.width() / 2, cp.y() - self.height() / 2))
def closeEvent(self, evt):
self.page().profile().clearHttpCache() # 清除QWebEngineView的缓存
super(WebEngine, self).closeEvent(evt)
if __name__ == '__main__':
# 加载程序主窗口
app = QApplication(sys.argv)
view = WebEngine()
channel = QWebChannel()
handler = CallHandler() # 实例化QWebChannel的前端处理对象
channel.registerObject('PyHandler', handler) # 将前端处理对象在前端页面中注册为名PyHandler对象,此对象在前端访问时名称即为PyHandler'
view.page().setWebChannel(channel) # 挂载前端处理对象
url_string = urllib.request.pathname2url(os.path.join(os.getcwd(), "index.html")) # 加载本地html文件
# 当然您可以加载互联网行的url,也可自行监听本地端口,然后加载本地端口服务的资源,后面有介绍嘻嘻
# url_string = 'localhost:64291' # 加载本地html文件
print(url_string, '\n', os.path.join(os.getcwd(), "index.html"))
view.load(QUrl(url_string))
time.sleep(2)
view.show()
sys.exit(app.exec_())
|
'''
The primes 3, 7, 109, and 673, are quite remarkable. By taking
any two primes and concatenating them in any order the result
will always be prime. For example, taking 7 and 109, both 7109
and 1097 are prime. The sum of these four primes, 792, represents
the lowest sum for a set of four primes with this property.
Find the lowest sum for a set of five primes for which any two
primes concatenate to produce another prime.
'''
from itertools import permutations, combinations
from math import sqrt, ceil
from time import time
def main():
primos = [i for i in range(3, 10000) if is_prime(i)]
LEN = len(primos)
for i in range(LEN):
p1 = primos[i]
for j in range(i + 1, LEN):
p2 = primos[j]
if not prueba_simple((p1, p2)):
continue
for k in range(j + 1, LEN):
p3 = primos[k]
if not prueba_simple((p1, p3)) or \
not prueba_simple((p2, p3)):
continue
for l in range(k + 1, LEN):
p4 = primos[l]
if not prueba_simple((p1, p4)) or \
not prueba_simple((p2, p4)) or \
not prueba_simple((p3, p4)):
continue
for m in range(l + 1, LEN):
p5 = primos[m]
if not prueba_simple((p1, p5)) or \
not prueba_simple((p2, p5)) or \
not prueba_simple((p3, p5)) or \
not prueba_simple((p4, p5)):
continue
else:
return (p1, p2, p3, p4, p5), sum((p1, p2, p3, p4, p5))
# es_primo = lambda n: all(n % i != 0 for i in range(2, int(sqrt(n) + 1)))
def is_prime(x):
if x % 2 == 0:
return False
for i in range(3, int(ceil(sqrt(x)) + 1), 2):
if x % i == 0:
return False
return True
def get_prime():
i = 3
while True:
if is_prime(i):
yield i
i += 1
def prueba_simple(xs):
for i in permutations(xs):
if not is_prime(reduce(lambda x, y: int(str(x) + str(y)), i)):
return False
return True
def contains(xs):
for i in xs[0]:
if i in xs[1]:
return True
return False
if __name__ == "__main__":
start = time()
print main()
print 'total time =', (time() - start), 's'
|
__author__ = 'Mies'
'''
Inleveropdracht Week 5, Pyramide/diamant:
Schrijf een programma dat aan de gebruiker een getal vraagt. Hij toont dan een pyramide patroon op basis van dit getal, waarbij het ingevoerde getal boven staat en elke opvolgende regel dit getal met 1 eenheid minder. Als de gebruiker geen getal invoert, dan verschijnt de melding: “Helaas geen getal!. Probeer opnieuw!”. De gebruiker moet dan opnieuw een getal invoeren.
Als voorbeeld:
Voer een getal in: 4
4
333
22222
1111111
Nog een voorbeeld:
Voer een getal in: A “Helaas geen getal!. Voer een getal in: 2
2
111
Nog een voorbeeld:
Voer een getal in: 5
5
444
33333
2222222
111111111
Probeer opnieuw!”
Nog een voorbeeld:
Uitdaging aan de studenten (voor 60 binpunten):
Er moet in plaats van een pyramide een diamant getoond worden: Voer een getal in: 5
5
444
33333
2222222
111111111
2222222
33333
444
5
'''
# asks for a numeral input, performs several checks if asked to.
def numeral_input( msg="Please enter a valid number.\n",
positive=False,
roundnumber = False,
errormsg="Can't convert to number, please try again. \n",
notposmes="Number is not more than zero, please try again."):
while True:
inputval = input( msg)
try:
if(roundnumber):
floatinput = int(inputval)
else:
floatinput = float(inputval)
if positive and floatinput < 0:
print(notposmes)
continue
return floatinput
except ValueError:
print(errormsg)
continue
# draws pyramid
def drawpyramid(max):
for i in range(1, max+1):
spacesinfront = (max-i)
characters = ((i*2)-1)
print(' ' * spacesinfront + str(i)*characters)
# draws pyramid, followed by inverted pyramid minus bottom layer.
def drawdiamond(max):
drawpyramid(max)
for i in range(max-1, 0, -1):
spacesinfront = (max-i)
characters = (i*2)-1
print(' ' * spacesinfront + str(i)*characters)
# asks for user input, draws a diamond
def main() -> None:
pyramidemax = numeral_input('Geef het formaat van de pyramide\n', True, True, 'Kan niet converteren naar geldig, heel getal. Probeer het opnieuw.\n', 'Getal is niet meer dan 0, probeer het opnieuw.')
drawdiamond(pyramidemax)
if __name__ == '__main__':
main()
|
# FTP port
PORT = 21
# Maximum duration from an initial probe to a successful login
SCAN_TIMEOUT = 20
# Maximum simultaneous scan tasks
MAX_SCAN_TASKS = 1000
# Interval between scans
SCAN_INTERVAL = 10 * 60
# Offline time after which a server is forgotten
OFFLINE_DELAY = 24 * 3600
# Timeout for the connection to an FTP server during indexation
INDEX_TIMEOUT = 30
# Maximum simultaneous index tasks
MAX_INDEX_TASKS = 1
# Minimum interval between index tasks on a given host
INDEX_INTERVAL = 4 * 3600
# Maximum number of FTP errors allowed during the indexation of a server
MAX_INDEX_ERRORS = 10
# Signals to catch
SOFT_SIGNALS = ['SIGINT', 'SIGTERM']
|
import re
import socket
from twisted.mail.smtp import ESMTPSenderFactory, sendmail
from twisted.internet.defer import Deferred
from twisted.internet import reactor
from twisted.internet import threads
from cStringIO import StringIO
from email.generator import Generator
import mailer
EMAIL_RE = re.compile(r"(?P<local>[^@]+)@(?P<domain>[^@]+)")
# Proxy the message class within our own email module for easy access.
Message = mailer.Message
class TwistedMailer(mailer.Mailer):
"""
Twisted version of the simple Mailer.
"""
def send(self, msg):
if self._usr or self._pwd:
factory = ESMTPSenderFactory(self._usr, self._pwd)
def _sendmail(fromAddress, toAddress, message, host='localhost', port=0,
user=None, password=None, callback=None, errback=None):
"""
Connect to an SMTP server and send an email message. If username and
password are provided, ESMTP is used to connect, otherwise a standard SMTP
connection is used.
@param fromAddress: The SMTP reverse path (ie, MAIL FROM)
@param toAddress: The SMTP forward path (ie, RCPT TO)
@param message: An L{email.message.Message} instance (such as C{MIMEText}).
@param host: The MX host to which to connect.
@param port: The port number to which to connect.
@param user: The username with which to authenticate.
@param password: The password with which to authenticate.
@return: A Deferred which will be called back when the message has been
sent or which will errback if it cannot be sent.
"""
if user or password:
fp = StringIO()
g = Generator(fp, mangle_from_=False, maxheaderlen=60)
g.flatten(message)
d = Deferred()
factory = ESMTPSenderFactory(user, password, fromAddress, toAddress,
message, d)
reactor.connectTCP(host, port, factory)
else:
d = sendmail(host, fromAddress, toAddress, )
return d
def valid_email(email):
"""
Very basic check to see if provided email address seems valid.
@rtype: C{bool}
"""
return True if EMAIL_RE.match(email) else False
def reverse_dns(ip):
"""
Perform a reverse-DNS lookup on a given IP address string. The lookup is
asynchronous, so a callback is also required.
@param ip: A string IP address.
@return: A deferred. You should probably .addCallback().
@rtype: L{twisted.internet.defer.Deferred}
"""
return threads.deferToThread(lambda: socket.gethostbyaddr(ip)[0])
|
Python 3.5.2 (v3.5.2:4def2a2901a5, Jun 25 2016, 22:18:55) [MSC v.1900 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> data = b'Hello World'
>>> data[0:5]
b'Hello'
>>> data.startswith(b'Hello')
True
>>> data.split()
[b'Hello', b'World']
>>> data.replace(b'Hello', b'Hello Better')
b'Hello Better World'
>>>
|
import copy
from datetime import datetime
import json
version = 1.0
class MetaBase():
"""MetaBase class"""
def __init__(self, name='', value='', description=''):
self.name = name
self.value = value
self.description = description
def get_dict(self):
return copy.deepcopy(self.__dict__)
class MetaInput():
"""MetaInput class"""
def __init__(self, name='', description = '', min=0.0, max=0.0,unit='', type='', required=False):
self.name = name
self.description = description
self.min = min
self.max = max
self.unit = unit
self.type = type
self.required = required
def get_dict(self):
return copy.deepcopy(self.__dict__)
class MetaOutput:
"""MetaOutput class"""
def __init__(self):
self.name = ""
self.description = ""
self.min = 0.0
self.max = 0.0
self.unit = ""
self.type = ""
def get_dict(self):
return copy.deepcopy(self.__dict__)
class MetaInfo:
"""MetaInfo class"""
def __init__(self, model='hwbi', collection='qed'):
self.modelVersion = model
self.collection = collection
self.version = version
self.description = ""
self.status = ""
self.timestamp = str(datetime.now())
self.url = Url()
def get_dict(self):
dct = copy.deepcopy(self.__dict__)
dct['url'] = self.url.get_dict()
return dct
class Url:
"""Url class"""
def __init__(self, href='', type='application/json'):
self.type = type
self.href = href
def get_dict(self):
return copy.deepcopy(self.__dict__)
class Link:
"""Link class"""
def __init__(self, rel='', href='', type='application/json'):
self.rel = rel
self.type = type
self.href = href
def get_dict(self):
return copy.deepcopy(self.__dict__)
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'get_dict'):
return obj.get_dict()
else:
return json.JSONEncoder.default(self, obj) |
#!/usr/bin/python
#-*- coding:utf-8 -*-
from firstimage_extractor import *
HUDONG_DUMP='/home/xlore/disk2/data/hudong/hudong-dump-20120823.dat'
OUTPUT = 'hudong.firstimage.dat'
TTL = '/home/xlore/Xlore/etc/ttl/xlore.instance.icon.hudong.ttl'
INSTANCE_LIST='/home/xlore/Xlore/etc/ttl/xlore.instance.list.ttl'
class HudongFirstImage(FirstImage):
#def __init__(self, i, o, ttl):
# super(i, o, ttl)
def extract(self):
title = ""
image = ""
with open(self.output,'w') as f:
for line in open(self.input):
if line.startswith('Title:'):
title = line.strip('\n').split(':')[-1]
if line.startswith('Image:'):
image = line.strip('\n').split(':',1)[-1].split('::;')[0]
if not 'http://a0.att.hudong.com/00/00/404.jpg' == image:
f.write('%s\t%s\n'%(title,image))
f.flush()
def generate_ttl(self):
with open(self.ttl,'w') as f:
f.write('@base <http://xlore.org/instance/> .\n')
f.write('@prefix property: <http://xlore.org/property#> .\n')
f.write('@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\n')
f.write('@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\n')
f.write('@prefix owl: <http://www.w3.org/2002/07/owl#> .\n')
f.write('\n')
f.write('property:hasIcon rdf:type rdf:Property .\n')
f.write('property:hasIcon rdf:type owl:DatatypeProperty .\n')
f.write('property:hasIcon rdfs:label "hasIcon" .\n')
f.write('property:hasIcon rdfs:domain owl:Individual .\n')
f.write('\n')
f.flush()
for line in open(INSTANCE_LIST):
if '@zh' in line:
i = line[0:line.index(' ')]
title = line[line.index('"')+1:line.rindex('"')]
if title in images:
f.write('%s property:hasIcon "%s"@hudong .\n'%(i,images[title]))
f.flush()
if __name__=="__main__":
fi = HudongFirstImage(HUDONG_DUMP, OUTPUT, TTL)
fi.run()
|
"""
Originally ported from code at:
http://code.google.com/apis/chart/docs/data_formats.html#encoding_data
retrieved 2010/03/13, but then was cleaned up, enhanced, fixed, etc.
"""
import string
import math
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
simpleEncoding = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
simpleEncodingLength = len(simpleEncoding)
simpleRange = 1.0 * simpleEncodingLength - 1
"""
This function scales the submitted values so that
maxValue becomes the highest value.
"""
def simpleEncode(valueArray, maxValue):
chartData = []
for currentValue in valueArray:
if is_number(currentValue) and currentValue >= 0:
# Scale the value to maxValue
scaledVal = round(simpleRange * currentValue / maxValue)
if scaledVal <= simpleRange:
chartData.append(simpleEncoding[int(scaledVal)])
continue
chartData.append('_')
return string.join(chartData, '')
extendedEncoding = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-.'
extendedEncodingLength = len(extendedEncoding)
extendedRange = 1.0 * extendedEncodingLength * extendedEncodingLength - 1
"""
Same as simple encoding, but for extended encoding.
"""
def extendedEncode(valueArray, maxValue):
chartData = ''
for currentValue in valueArray:
if is_number(currentValue) and currentValue >= 0:
# Scale the value to maxValue
scaledVal = round(extendedRange * currentValue / maxValue)
if scaledVal <= extendedRange:
# Calculate first and second digits and add them to the output.
quotient = math.floor(scaledVal / extendedEncodingLength)
remainder = scaledVal - extendedEncodingLength * quotient
chartData += extendedEncoding[int(quotient)] + extendedEncoding[int(remainder)]
continue
chartData += '__'
return chartData
|
# Вывести последнюю букву в слове
word = 'Архангельск'
print(word[-1])
# Вывести количество букв а в слове
word = 'Архангельск'
count_a = 0
for l in word:
if l.lower() == 'а':
count_a += 1
print(f"The amount of a is: {count_a}")
# Вывести количество гласных букв в слове
word = 'Archangelstk'
vowels = 'aeuio'
vowels_count = 0
for l in word:
if l.lower() in vowels:
vowels_count += 1
print(vowels_count)
# Вывести количество слов в предложении
sentence = 'Мы приехали в гости'
print(len(sentence.split()))
# Вывести первую букву каждого слова на отдельной строке
sentence = 'Мы приехали в гости'
for w in sentence.split():
print(w[0])
# Вывести усреднённую длину слова.
sentence = 'Мы приехали в гости'
sum_length = 0
for w in sentence.split():
sum_length += len(w)
print(sum_length/len(sentence.split()))
|
import cv2
__belgium_file = "CoreFlags/Flag_of_Belgium.png"
__france_file = "CoreFlags/Flag_of_France.png"
__germany_file = "CoreFlags/Flag_of_Germany.png"
__trans_pride_file = "CoreFlags/Transgender_Pride_flag.png"
__indian_file = "CoreFlags/Flag_of_India.png"
__serbian_file = "CoreFlags/Flag_of_Serbia.png"
__panama_file = "CoreFlags/Flag_of_Panama.png"
def __get_img(filename: str):
return cv2.imread(filename, 1)
german_flag = __get_img(__germany_file)
french_flag = __get_img(__france_file)
belgian_flag = __get_img(__belgium_file)
trans_pride_flag = __get_img(__trans_pride_file)
indian_flag = __get_img(__indian_file)
serbian_flag = __get_img(__serbian_file)
panama_flag = __get_img(__panama_file)
__all__ = [german_flag, french_flag, belgian_flag, trans_pride_flag, indian_flag, serbian_flag, panama_flag]
|
#from tensorflow import keras
#from tensorflow.keras.layers import Dense
#from tensorflow.keras import layers
import os
from os import listdir
from os.path import isfile, join, isdir
import nltk
from keras_preprocessing.text import Tokenizer
from nltk.corpus import stopwords
import pymorphy2
from sklearn.feature_extraction.text import CountVectorizer
import docx
import string
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
def tokenizer_words(name_folder: str, file_name: str, path: str):
main_folder = os.listdir(path)
print("Загрузка файлов...")
list_of_files = []
for folders in main_folder: # Шарим по папкам с названием региона
sub_path = join(path, folders)
if isdir(join(sub_path, name_folder)):
# Получили список содержимого Алтайский край/3_2 и тому подобное
list_folders = listdir(join(path, folders, name_folder))
if not len(list_folders):
continue
# Шарим по папкам, название которых - токены
for sub_folders in list_folders:
sub_sub_path = join(path, folders, name_folder, sub_folders)
if isfile(sub_sub_path):
list_of_files.append([join(path, folders, name_folder, sub_folders1)
for sub_folders1 in list_folders if (sub_folders1[:2] != '~$')
and (sub_folders1[:7].lower() == "edition") and
((sub_folders1[-4:] == "docx") or (sub_folders1[-3:] == "doc"))])
continue
# Список содержимого
# Получаем список содержимого папки-токена
list_sub_folders = [f for f in listdir(sub_sub_path) if isdir(join(sub_sub_path, f))]
for sub_sub_sub_folders in list_sub_folders:
list_files = listdir(join(path, folders, name_folder, sub_folders, sub_sub_sub_folders))
list_of_files.append([join(path, folders, name_folder, sub_folders, sub_sub_sub_folders, curr_file)
for curr_file in list_files if (curr_file[:2] != '~$')
and (curr_file[:len(file_name)].lower() == file_name) and
((curr_file[-4:] == "docx") or (curr_file[-3:] == "doc"))])
else:
continue
list_of_files = [f for f in list_of_files if f]
# for i in range(len(list_of_files)):
# if not list_of_files[i]:
# list_of_files.pop(i)
print("Файлы загружены")
text_documents = []
print("Чтение файлов и токенизация слов...")
for document in list_of_files:
doc = docx.Document(document[0])
text = ""
for paragraph in doc.paragraphs:
if paragraph.text != "":
for run in paragraph.runs:
if run.font.highlight_color:
text += run.text
text_documents.append(text)
formatted_text = []
stop_words = stopwords.words("russian")
# Приведение всех слов к первоначальной форме
for text_document in text_documents:
text_token = nltk.word_tokenize(text_document)
morph = pymorphy2.MorphAnalyzer()
for i in range(len(text_token)):
p = morph.parse(text_token[i])[0]
text_token[i] = p.normal_form
if (text_token[i] not in string.punctuation) and (text_token[i] not in stop_words):
formatted_text.append(text_token[i])
formatted_text_without_punc = []
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(formatted_text)
formatted_text = vectorizer.get_feature_names()
print(formatted_text)
# encoded_text = tokenizer.texts_to_matrix(formatted_text, mode="binary")
tokenizer_words("4_2", "edition", "C:\\dataset\\DataSet_Razmetra")
|
import sys
import numpy as np
import matplotlib.pyplot as plot
import simulate
import learn
import random
threshold = 0.8
outcomes = [True, False]
rcaps = []
qcaps = []
r = {'mu': 50, 'sigma': 10}
p = {'mu': 60, 'sigma': 10}
length = 100
size = 500
workers = simulate.createHyperbolicWorker(size, r, p, None, 1)
def resetWorkers(workers):
for worker in workers:
worker.reset()
for i in range(10, length + 1):
tasks = simulate.createBinaryTasks(i)
resetWorkers(workers)
r_differences = []
q_differences = []
for worker in workers:
for task in tasks:
answer = worker.doTask(task, outcomes)
if answer == task:
worker.updateLearning(True)
else:
worker.updateLearning(False)
worker.learn()
rdiff = abs(worker.er - worker.r) / worker.r
qdiff = abs(worker.getEstimatedQualityAtX(worker.x) - worker.getQuality()) / worker.getQuality()
#print rdiff, qdiff
r_differences.append(rdiff)
q_differences.append(qdiff)
r_differences = sorted(r_differences)
q_differences = sorted(q_differences)
rcaps.append(r_differences[int(threshold * size) - 1] / 10.0)
qcaps.append(q_differences[int(threshold * size) - 1])
xs = np.arange(10, length + 1, 1)
f, ax = plot.subplots(1, 2)
ax[0].plot(xs, rcaps, label='learning speed difference')
ax[1].plot(xs, qcaps, label='quality difference')
ax[0].legend(bbox_to_anchor=(1, 0.7))
ax[1].legend(bbox_to_anchor=(1, 0.7))
ax[0].set_xlabel('tasks')
ax[1].set_xlabel('tasks')
ax[0].set_ylabel('difference')
ax[1].set_ylabel('difference')
plot.show()
|
import pandas as pd
import json
xls = pd.read_csv('datos.csv',na_values=['no info','.']#,index_col='Month'
)
# xls.head(#)
# meses= xls['Month']
print(xls)
# with open('datos.json') as json_file:
# data = json.load(json_file)
# for i in data:
# print (i) |
from apistar import App as BaseApp, Route, TestClient, http
from apistar_sentry import SentryMixin
class App(SentryMixin, BaseApp):
pass
class SomeHook:
def on_response(self, response: http.Response) -> None:
response.headers["x-example"] = "example"
def index():
return {}
app = App(
routes=[Route("/", "GET", index)],
event_hooks=[SomeHook()],
)
def test_mixin_doesnt_interfere_with_response_injection():
# Given that I have a test client
client = TestClient(app)
# When I request the index handler
response = client.get("/")
# Then I expect SomeHook to populate the headers
assert response.headers["x-example"] == "example"
|
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import random
import time
plt.style.use('ggplot')
def binarySearch(arr, l, r, x, c):
c+=1
while l <= r:
c+=1
mid = l + (r - l)//2;
if arr[mid] == x:
c+=1
return c
elif arr[mid] < x:
c+=1
l = mid + 1
else:
c+=1
r = mid - 1
return c
avgcase=[]
bstcase=[]
wrtcase=[]
avgtime=[]
bsttime=[]
wrttime=[]
times=[]
for i in range(10,101,10):
times.append(i)
arr=[j for j in range(i)]
search=arr[random.randint(0,i-1)]
avg_start_time=time.time()
avgcount=binarySearch(arr,0,i-1,search,0)
avg_fin_time=avg_start_time-time.time()
avg_fin_time*=-1
avgtime.append(avg_fin_time)
bst_start_time=time.time()
bstcount=binarySearch(arr,0,i-1,arr[(i-1)//2],0)
bst_fin_time=bst_start_time-time.time()
bst_fin_time*=-1
bsttime.append(bst_fin_time)
wrt_start_time=time.time()
wrtcount=binarySearch(arr,0,i-1,i,0)
wrt_fin_time=wrt_start_time-time.time()
wrt_fin_time*=-1
wrttime.append(wrt_fin_time)
bstcase.append(bstcount)
avgcase.append(avgcount)
wrtcase.append(wrtcount)
plt.plot(times,avgcase,label="average case")
plt.scatter(times,avgcase)
plt.plot(times,bstcase,label="best case")
plt.scatter(times,bstcase)
plt.plot(times,wrtcase,label="worst case")
plt.scatter(times,wrtcase)
plt.legend()
plt.xlabel('test cases')
plt.ylabel('number of steps')
plt.savefig('binary_search.png')
# ============= SPACE COMPLEXITY ==============
# Space Complexity of an algorithm is total space taken by the algorithm with respect to the input size. Space complexity includes both Auxiliary space and space used by input.
# So, the space complexity here, is again summation of all the memory used, all the variables, arrays and function calls. As there is an array of size n here, others can be ignored, as they are comparable or smaller than the size n.
# So, the space complexity is O(n).
# If, you conside auxillary space complexity, which is the extra space or temporary space used by an algorithm, then it is O(1) here, as it uses only temporary variables, mid, ub, lb, and searchelem. |
import numpy as np
array = np.random.rand(100)
array[5] = np.nan
# Returns inccorrect result
print(np.max(array))
# nan
# Returns correct result
print(np.nanmax(array))
# 0.992949280963
|
startStr = raw_input("Where to start? > ")
endStr = raw_input("Where to end? > ")
byStr = raw_input("Count by > ")
start = int(startStr)
end = int (endStr)
by = int(byStr)
# awesome solution
print range(start, end, by)
# actual solution
curr = start
while curr < end:
print curr
curr += by |
import pandas as pd
import numpy as np
return_dataset = pd.read_csv('processed_data/returns.csv')
np.random.seed(42)
# get the index from the df for 2018-01-01
i_2018 = return_dataset[return_dataset['Date']=='2018-01-01'].index[0]
return_all = return_dataset[['btc', 'eth', 'xrp']].to_numpy()
# total number of time points
T = len(return_dataset)
df_mc_95 = pd.DataFrame({"Date":[],
"btc":[],"var_95_btc":[],
"eth":[],"var_95_eth":[],
"xrp":[],"var_95_xrp":[]})
# Run Monte Carlo Simulations based on the predicted Volatility & assumed mean 0
# number of simulations
for n_sims in [1000,10000,100000]:
for i in range (i_2018, T):
# past 7 days data
train_data = return_all[i - 7:i]
# mean for respective crypto-currency
mean_all = np.mean(train_data, axis=0)
mean_btc = mean_all[0]
mean_eth = mean_all[1]
mean_xrp = mean_all[2]
# std for respective crypto-currency
std_all = np.std(train_data, axis=0)
std_btc = std_all[0]
std_eth = std_all[1]
std_xrp = std_all[2]
sim_returns_btc = np.random.normal(mean_btc, std_btc, n_sims)
sim_returns_eth = np.random.normal(mean_eth, std_eth, n_sims)
sim_returns_xrp = np.random.normal(mean_xrp, std_xrp, n_sims)
v_var_95_btc = -np.percentile(sim_returns_btc, 5)
v_var_95_eth = -np.percentile(sim_returns_eth, 5)
v_var_95_xrp = -np.percentile(sim_returns_xrp, 5)
returns_btc_eth_xrp = return_dataset.iloc[i,:]
date = returns_btc_eth_xrp[0]
v_return_btc = returns_btc_eth_xrp[1]
v_return_eth = returns_btc_eth_xrp[2]
v_return_xrp = returns_btc_eth_xrp[3]
update_data = [date,v_return_btc,v_var_95_btc, v_return_eth,v_var_95_eth, v_return_xrp,v_var_95_xrp]
df_mc_95.loc[i] = update_data
df_mc_95.to_excel("results/mc_%d_95.xlsx"%n_sims,index=False) |
# -*- coding: utf-8 -*-
"""
// Copyright 2020 PDF Association, Inc. https://www.pdfa.org
//
// This material is based upon work supported by the Defense Advanced
// Research Projects Agency (DARPA) under Contract No. HR001119C0079.
// Any opinions, findings and conclusions or recommendations expressed
// in this material are those of the author(s) and do not necessarily
// reflect the views of the Defense Advanced Research Projects Agency
// (DARPA). Approved for public release.
//
// SPDX-License-Identifier: Apache-2.0
//
// Generates a 3D/VR visualization JSON file for use with "3D Force-graph"
// from the "referencesGraph.json" file.
//
// See https://github.com/vasturiano/3d-force-graph/
//
// Author: Peter Wyatt
"""
import json
jfile = open("referencesGraph.json")
indata = json.load(jfile)
normrefs = indata["ISO32000_2_DB"]
nodes = []
for doc in normrefs:
n = {}
n["id"] = doc["id"]
n["name"] = doc["title"]
# n["nOutLinks"] = len(doc["refs"])
# n["nInLinks"] = len(doc["referencedBy"])
# Size of planet node is proportional to the square of the number of out-going references
n["val"] = len(doc["refs"]) * len(doc["refs"])
# Short name is everything before a COMMA (normally the ISO document number or simple title)
# then trimmed before a COLON (which will strip off ISO years but so be it!)
if "label" in doc:
n["short"] = doc["label"]
elif "orgs" in doc and doc["orgs"]:
org = doc["orgs"][0]
s = org["org"]
if "stid" in org:
s += ", " + org["stid"]
if "date" in doc:
s += ", " + doc["date"]
n["short"] = s
else:
n["short"] = doc["title"]
# Make PDF 2.0 the large red centre of the 3D universe!
# otherwise rough grouping (and thus color coding of node) based on title
# Parsing "group" property by the first org in orgs array
if "orgs" in doc and doc["orgs"]:
n["group"] = doc["orgs"][0]["org"]
else:
n["group"] = "Other"
nodes.append(n)
links = []
for doc in normrefs:
refs = []
refs = doc["refs"]
for ref in refs:
lnk = {}
lnk["source"] = doc["id"]
lnk["target"] = ref
# Make all 1st order links from PDF 2.0 red
# otherwise do rough grouping (and thus color coding of link) based on source title
# Make PDF 2.0 the large red centre of the 3D universe
if doc["id"] == 0:
lnk["color"] = "red"
if "orgs" in doc:
lnk["group"] = doc["orgs"][0]["org"]
else:
lnk["group"] = "Other"
# 'desc' attribute is what links display below their label (default attribute 'name') but in smaller text
# This text is too long and makes for too much... need short friendly names for documents!
# lnk_doc = next(r for r in normrefs if r["id"] == ref)
# lnk["desc"] = "From " + doc["title"] + " to " + lnk_doc["title"]
links.append(lnk)
outdata = {}
outdata["nodes"] = nodes
outdata["links"] = links
with open("pdf20-norm-refs.json", 'w') as outfile:
json.dump(outdata, outfile, indent=4)
|
from datetime import datetime
class LogUtil:
def __init__(self, log_prefix):
self.prefix = log_prefix
def log(self, message):
print("{} {}: {}".format(datetime.now(), self.prefix, message)) |
def prime_factorize(n):
a = []
while n % 2 == 0:
a.append(2)
n //= 2
f = 3
while f * f <= n:
if n % f == 0:
a.append(f)
n //= f
else:
f += 2
if n != 1:
a.append(n)
return a
N = int(input())
primes = prime_factorize(N)
num = 0
hist = [1]
c = 1
tmp = 1
for p in primes:
# 前回と異なる場合
if tmp != p:
c = 1
tmp = p
c *= p
if c not in hist:
hist.append(c)
num += 1
c = 1
print(num)
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.DataFrame(columns=('Type', 'Ratio', 'Times', 'gid', 'k', 'Number of CPU Cores', 'Memory Constraints', 'Isolation Level', 'Lower Bound', 'Upper Bound'))
cores = [2, 3, 4, 5, 6]
mem = [1, 0.95, 0.9, 0.85, 0.8, 0.75]
isol = [1.5, 3, 5, 7.5, 10.5]
tests = [[0, 2, 1], [1, 2, 1], [2, 2, 1], [3, 2, 1], [4, 2, 1], [2, 0, 1], [2, 1, 1], [2, 3, 1], [2, 4, 1], [2, 5, 1], [2, 2, 0], [2, 2, 2], [2, 2, 3], [2, 2, 4]]
def draw_read():
cnt = 0
for gid in [1, 2, 3, 4]:
for k in range(14):
path = './graph' + str(gid) + '/' + str(k) + '/'
with open(path+'fb.txt', 'r') as f:
FB = [float(line.strip()) for line in f.readlines()]
with open(path+'i2c.txt', 'r') as f:
I2C = [float(line.strip()) for line in f.readlines()]
with open(path+'inorder.txt', 'r') as f:
I = [float(line.strip()) for line in f.readlines()]
with open(path+'random.txt', 'r') as f:
R = [float(line.strip()) for line in f.readlines()]
with open(path+'lower.txt', 'r') as f:
L = [float(line.strip()) for line in f.readlines()]
with open(path+'upper.txt', 'r') as f:
U = [float(line.strip()) for line in f.readlines()]
"""
with open(path+'sc.txt', 'r') as f:
S = [float(line.strip()) for line in f.readlines()]
"""
with open(path+'fb_open.txt', 'r') as f:
FB_open = [float(line.strip()) for line in f.readlines()]
with open(path+'i2c_open.txt', 'r') as f:
I2C_open = [float(line.strip()) for line in f.readlines()]
with open(path+'inorder_open.txt', 'r') as f:
I_open = [float(line.strip()) for line in f.readlines()]
with open(path+'random_open.txt', 'r') as f:
R_open = [float(line.strip()) for line in f.readlines()]
for i, x in enumerate(FB):
df.loc[cnt] = ['CPF', x, FB_open[i], gid, k, cores[tests[k][0]], mem[tests[k][1]], isol[tests[k][2]], L[i], U[i]]
cnt += 1
for i, x in enumerate(I2C):
df.loc[cnt] = ['ICRB', x, I2C_open[i], gid, k, cores[tests[k][0]], mem[tests[k][1]], isol[tests[k][2]], L[i], U[i]]
cnt += 1
for i, x in enumerate(I):
df.loc[cnt] = ['STO', x, I_open[i], gid, k, cores[tests[k][0]], mem[tests[k][1]], isol[tests[k][2]], L[i], U[i]]
cnt += 1
for i, x in enumerate(R):
df.loc[cnt] = ['Rand', x, R_open[i], gid, k, cores[tests[k][0]], mem[tests[k][1]], isol[tests[k][2]], L[i], U[i]]
cnt += 1
print(gid, k)
return
if __name__ == '__main__':
draw_read()
df.to_csv('./df_heft.csv') |
# Variable
exampleString = "Hello World"
print(exampleString)
print(type(exampleString))
myName = "Chris Ritter"
myAge = 23
myBirthDay = "05/11/1997"
myIntro = f"Hello my name is {myName} and I am {myAge}. I was born on {myBirthDay}." #String Interpolation
print(myIntro)
#Lists
listOfDifferentTypes = [0,1.0, "Some String", [0,1,2,3]]
|
import yaml
import random
import string
def GetZonesList(context):
zones = []
if context.properties['usEast1b']:
zones.append('us-east1-b')
if context.properties['usEast1c']:
zones.append('us-east1-c')
if context.properties['usEast1d']:
zones.append('us-east1-d')
if context.properties['usCentral1a']:
zones.append('us-central1-a')
if context.properties['usCentral1b']:
zones.append('us-central1-b')
if context.properties['usCentral1c']:
zones.append('us-central1-c')
if context.properties['usCentral1f']:
zones.append('us-central1-f')
if context.properties['europeWest1b']:
zones.append('europe-west1-b')
if context.properties['europeWest1c']:
zones.append('europe-west1-c')
if context.properties['europeWest1d']:
zones.append('europe-west1-d')
if context.properties['asiaEast1a']:
zones.append('asia-east1-a')
if context.properties['asiaEast1b']:
zones.append('asia-east1-b')
if context.properties['asiaEast1c']:
zones.append('asia-east1-c')
assert len(zones) > 0, 'No zones selected for DataStax Enterprise nodes'
return zones
def GenerateConfig(context):
config = {'resources': []}
# Set zones list based on zone booleans.
if ('zones' not in context.properties or len(context.properties['zones']) == 0):
context.properties['zones'] = GetZonesList(context)
# Set zone property to match ops center zone. Needed for calls to common.MakeGlobalComputeLink.
context.properties['zone'] = context.properties['opsCenterZone']
cluster_name = 'clusters-' + context.env['name']
# Generate a random bucket name
bucket_suffix = ''.join([random.choice(string.ascii_lowercase + string.digits) for n in xrange(10)])
sshkey_bucket = context.env['deployment'] + '-ssh-pub-key-bucket-' + bucket_suffix
# DSE version
dse_version = context.properties['dseVersion']
# Set cassandra's user password
db_pwd = context.properties['cassandraPwd']
# Set DataStax Academy credentials
dsa_username = context.properties['dsa_username']
dsa_password = context.properties['dsa_password']
# Set default OpsCenter Admin password
opsc_admin_pwd = context.properties['opsCenterAdminPwd']
# Set DC size, number of DCs and cluster's size
dc_size = context.properties['nodesPerZone']
num_dcs = len(context.properties['zones'])
cluster_size = dc_size * num_dcs
seed_nodes_dns_names = context.env['deployment'] + '-' + context.properties['zones'][0] + '-1-vm.c.' + context.env[
'project'] + '.internal.'
opscenter_node_name = context.env['deployment'] + '-opscenter-vm'
opscenter_dns_name = opscenter_node_name + '.c.' + context.env['project'] + '.internal.'
# Prepare a storage bucket to store our randomly generated SSH key pair for LCM's DSE install
ssh_pub_key_bucket = {
'name': sshkey_bucket,
'type': 'storage.v1.bucket',
'properties': {
'name': sshkey_bucket,
}
}
config['resources'].append(ssh_pub_key_bucket)
# Script to run inside a DSE node during instance instantiation
dse_node_script = '''
#!/usr/bin/env bash
# If dse already installed, do nothing
dpkg -s dse &> /dev/null
retVal=$?
if [ $retVal -eq 0 ]; then
exit 0
fi
# Prepare for fresh DSE installation
mkdir /mnt
mkfs.ext4 -m 0 -F -E lazy_itable_init=0,lazy_journal_init=0,discard /dev/disk/by-id/google-${HOSTNAME}-data-disk
mount -o discard,defaults /dev/disk/by-id/google-${HOSTNAME}-data-disk /mnt
echo "/dev/disk/by-id/google-${HOSTNAME}-data-disk /mnt ext4 discard,defaults 0 2" | tee -a /etc/fstab
mkdir -p /mnt/data1
mkdir -p /mnt/data1/data
mkdir -p /mnt/data1/saved_caches
mkdir -p /mnt/data1/commitlog
mkdir -p /mnt/data1/dsefs
chmod -R 777 /mnt/data1
##### Install DSE the LCM way
cd ~ubuntu
release="master"
gsutil cp gs://dse-gcp-marketplace/dse-gcp-install-$release.tar.gz .
tar -xvf dse-gcp-install-$release.tar.gz
# install extra OS packages
pushd dse-gcp-install-$release
./extra_packages.sh
./install_java.sh -o
popd
public_ip=`curl --retry 10 icanhazip.com`
private_ip=`echo $(hostname -I)`
node_id=$private_ip
cluster_name=''' + cluster_name + '''
rack="rack1"
db_pwd=''' + db_pwd + '''
zone=$(curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/zone" | grep -o [[:alnum:]-]*$)
data_center_name=$zone
# Retrieve internal OPSC IP address
opscenter_dns_name=''' + opscenter_dns_name + '''
opsc_ip=`dig +short $opscenter_dns_name`
# Grab lcm_pem.pub pubilc key from Google Cloud Storage
cd ~ubuntu/.ssh/
sshkey_bucket=''' + sshkey_bucket + '''
gsutil cp gs://$sshkey_bucket/lcm_pem.pub .
while [ $? -ne 0 ]
do
sleep 1s
gsutil cp gs://$sshkey_bucket/lcm_pem.pub .
done
chown ubuntu:ubuntu lcm_pem.pub
cat lcm_pem.pub >> authorized_keys
pushd ~ubuntu/dse-gcp-install-$release
opsc_admin_pwd=''' + opsc_admin_pwd + '''
./addNode.py \
--opscpw $opsc_admin_pwd \
--opsc-ip $opsc_ip \
--clustername $cluster_name \
--dcname $data_center_name \
--rack $rack \
--pubip $private_ip \
--privip $private_ip \
--nodeid $node_id \
popd
'''
zonal_clusters = {
'name': 'clusters-' + context.env['name'],
'type': 'regional_multi_vm.py',
'properties': {
'sourceImage': 'https://www.googleapis.com/compute/v1/projects/datastax-public/global/images/datastax-enterprise-ubuntu-1604-xenial-v20180824',
'zones': context.properties['zones'],
'machineType': context.properties['machineType'],
'network': context.properties['network'],
'numberOfVMReplicas': context.properties['nodesPerZone'],
'disks': [
{
'deviceName': 'vm-data-disk',
'type': 'PERSISTENT',
'boot': 'false',
'autoDelete': 'true',
'initializeParams': {
'diskType': context.properties['dataDiskType'],
'diskSizeGb': context.properties['diskSize']
}
}
],
'bootDiskType': 'pd-standard',
'bootDiskSizeGb': 20,
'metadata': {
'items': [
{
'key': 'startup-script',
'value': dse_node_script
}
]
}
}
}
opscenter_script = '''
#!/usr/bin/env bash
# If opscenter already installed, do nothing
dpkg -s opscenter &> /dev/null
retVal=$?
if [ $retVal -eq 0 ]; then
exit 0
fi
# Prepare for fresh OpsCenter installation
cd ~ubuntu
release="master"
gsutil cp gs://dse-gcp-marketplace/dse-gcp-install-$release.tar.gz .
tar -xvf dse-gcp-install-$release.tar.gz
# install extra OS packages, Java, and OpsCenter
pushd dse-gcp-install-$release
./extra_packages.sh
./install_java.sh -o
./installOpsc.sh
# Update password for default DSE OpsCenter administrator (admin)
opsc_admin_pwd=''' + opsc_admin_pwd + '''
./set_opsc_pw_https.sh $opsc_admin_pwd
popd
# Generate lcm_pem private and pubilc keys
pushd ~ubuntu/.ssh/
ssh-keygen -t rsa -N '' -f lcm_pem
chown ubuntu:ubuntu lcm_pem*
privkey=$(readlink -f ~ubuntu/.ssh/lcm_pem)
sshkey_bucket=''' + sshkey_bucket + '''
gsutil cp ./lcm_pem.pub gs://$sshkey_bucket/
popd
# Set up cluster in OpsCenter the LCM way
cd ~ubuntu/dse-gcp-install-$release
# Generate cluster name
cluster_name=''' + cluster_name + '''
# Generate number of DCs
num_dcs=''' + str(num_dcs) + '''
# Generate cluster size
cluster_size=''' + str(cluster_size) + '''
# DSE version
dse_version=''' + dse_version + '''
# Generate cassandra user's password
db_pwd=''' + db_pwd + '''
# Generate DataStax Academy credentials
dsa_username=''' + dsa_username + '''
dsa_password=''' + dsa_password + '''
# Retrieve OpsCenter's public IP address
private_ip=`echo $(hostname -I)`
sleep 1m
./setupCluster.py --user ubuntu --pause 60 --opscpw $opsc_admin_pwd --trys 40 --opsc-ip $private_ip --clustername $cluster_name --privkey $privkey --datapath /mnt/data1 --repouser $dsa_username --repopw $dsa_password --dbpasswd $db_pwd --dsever $dse_version
./triggerInstall.py --opsc-ip $private_ip --opscpw $opsc_admin_pwd --clustername $cluster_name --clustersize $cluster_size
./waitForJobs.py --num 1 --opsc-ip $private_ip --opscpw $opsc_admin_pwd
# Alter required keyspaces for multi-DC
./alterKeyspaces.py --opscpw $opsc_admin_pwd --delay 60 >> ../../repair.log &
# Remove public key from Google cloud storage bucket
gsutil rm gs://$sshkey_bucket/lcm_pem.pub
'''
opscenter_node_name = context.env['deployment'] + '-opscenter-vm'
opscenter_node = {
'name': opscenter_node_name,
'type': 'vm_instance.py',
'properties': {
'instanceName': opscenter_node_name,
'sourceImage': 'https://www.googleapis.com/compute/v1/projects/datastax-public/global/images/datastax-enterprise-ubuntu-1604-xenial-v20180824',
'zone': context.properties['opsCenterZone'],
'machineType': context.properties['machineType'],
'network': context.properties['network'],
'bootDiskType': 'pd-standard',
'serviceAccounts': [{
'email': 'default',
'scopes': ['https://www.googleapis.com/auth/compute', 'https://www.googleapis.com/auth/devstorage.full_control']
}],
'metadata': {
'items': [
{
'key': 'startup-script',
'value': opscenter_script
}
]
}
}
}
config['resources'].append(zonal_clusters)
config['resources'].append(opscenter_node)
first_enterprise_node_name = context.env['deployment'] + '-' + context.properties['zones'][0] + '-1-vm'
outputs = [
{
'name': 'project',
'value': context.env['project']
},
{
'name': 'opsCenterNodeName',
'value': opscenter_node_name
},
{
'name': 'firstEnterpriseNodeName',
'value': first_enterprise_node_name
},
{
'name': 'firstEnterpriseNodeSelfLink',
'value': '$(ref.' + first_enterprise_node_name + '.selfLink)'
},
{
'name': 'zoneList',
'value': ', '.join(context.properties['zones'])
},
{
'name': 'x-status-type',
'value': 'console'
},
{
'name': 'x-status-instance',
'value': opscenter_node_name
}
]
config['outputs'] = outputs
return yaml.dump(config)
|
#!C:\Users\Lee\AppData\Local\Programs\Python\Python38-32\python.exe
### Python AI Script
### Author: Lee Hughs
### Date: 2020/02/01
import sys
##initiate global variables/weights
p_weight = 1;
ep_weight = -1;
k_weight = 2;
ek_weight = -2;
class MoveTree:
def __init__(self, src):
self.moves = {
0: src
}
class Node:
def __init__(self, board, player, move = None, parent = None):
self.board = board
#score = get_fitness(self.board)
self.score = None
self.player = player
self.move = move
self.parent = parent
self.children = []
#temp = Node(board, move)
#temp.children.append(Node(board,move, temp))
def usage():
print('''Script usage: checkers.py [board state as csv] [player as Rr:Bb] ''')
def mini_max(node, isMax, alpha, beta):
if(len(node.children) == 0):
return get_fitness(node.board)
if(isMax):
bestVal = float('-inf')
for child in node.children:
value = mini_max(child, False, alpha, beta)
bestVal = max(bestVal, value)
alpha = max(alpha, bestVal)
if beta <= alpha:
break
node.score = bestVal
return bestVal
else:
bestVal = float('inf')
for child in node.children:
value = mini_max(child, True, alpha, beta)
bestVal = min(bestVal, value)
beta = min(beta, bestVal)
if(beta <= alpha):
break
node.score = bestVal
return bestVal
#fill a tree with the possible board states/moves at "depth" moves from now
def fill_tree(root, depth):
if(depth == 0):
return
moves = []
enemy = ("Rr" if (root.player == "Bb") else "Bb")
#enumerate through board looking for valid pieces to move
for (i, row) in enumerate(root.board):
for (j, value) in enumerate(row):
if(str(value) in root.player):
moves.extend(get_valid_moves(board, root.player, i, j))
#populate root's children list
for move in moves:
root.children.append(Node(execute_move(root.board, move, root.player), enemy, move, root))
#recursively call this method on all the children
for child in root.children:
fill_tree(child, depth-1)
#return a list of valid moves for a given piece
def get_valid_moves(board, player, i, j):
moves = []
#board = [row[:] for row in board_ref] deep cloning not needed here
dirx = ( 1 if (player == "Bb") else -1)
#forward right check
if(i + dirx >= 0 and i + dirx < 8 and j + 1 >= 0 and j + 1 < 8):
if(board[i+dirx][j+1] == None):
moves.append([[i,j],[i+dirx,j+1]])
#forward left check
if(i + dirx >= 0 and i + dirx < 8 and j - 1 >= 0 and j - 1 < 8):
if(board[i+dirx][j-1] == None):
moves.append([[i,j],[i+dirx,j-1]])
#backwards checks
if(str(board[i][j]) in "BR"):
#backward right check
if(i - dirx >= 0 and i - dirx < 8 and j + 1 >= 0 and j + 1 < 8):
if(board[i-dirx][j+1] == None):
moves.append([[i,j],[i-dirx,j+1]])
#backward left check
if(i - dirx >= 0 and i - dirx < 8 and j - 1 >= 0 and j - 1 < 8):
if(board[i-dirx][j-1] == None):
moves.append([[i,j],[i-dirx,j-1]])
jumps = (get_valid_jumps(board, player, i, j))
#clean jumps into solid paths
for (k,jump) in enumerate(jumps):
if (jump[0][0] == i and jump[0][1] == j):
moves.append(jump)
else:
while(not(jump[0][0] == i and jump[0][1] == j)):
for con_jump in reversed(jumps[:k]):
if(con_jump[1][0] == jump[0][0] and con_jump[1][1] == jump[0][1]):
jump.insert(0, con_jump[0])
break
moves.append(jump)
return moves
def get_valid_jumps(board_ref, player, i, j):
jumps = []
board = [row[:] for row in board_ref]
dirx = ( 1 if (player == "Bb") else -1)
enemy = ("Rr" if (player == "Bb") else "Bb")
#forward right check
if( i + (2*dirx) >= 0 and i + (2*dirx) < 8 and j + 2 >= 0 and j + 2 < 8):
if( str(board[i+dirx][j+1]) in enemy and board[i+(2*dirx)][j+2] == None):
jumps.append([[i,j],[i+(2*dirx),j+2]])
board[i+dirx][j+1] = None
jumps.extend(get_valid_jumps(board, player, i+(2*dirx), j+2))
#forward left check
if( i + (2*dirx) >= 0 and i + (2*dirx) < 8 and j - 2 >= 0 and j - 2 < 8):
if( str(board[i+dirx][j-1]) in enemy and board[i+(2*dirx)][j-2] == None):
jumps.append([[i,j],[i+(2*dirx),j-2]])
board[i+dirx][j-1] = None
jumps.extend(get_valid_jumps(board, player, i+(2*dirx), j-2))
#backwards checks
if(str(board[i][j]) in "BR"):
dirx = dirx * -1
#backwards right check
if( i + (2*dirx) >= 0 and i + (2*dirx) < 8 and j + 2 >= 0 and j + 2 < 8):
if( str(board[i+dirx][j+1]) in enemy and board[i+(2*dirx)][j+2] == None):
jumps.append([[i,j],[i+(2*dirx),j+2]])
board[i+dirx][j+1] = None
jumps.extend(get_valid_jumps(board, player, i+(2*dirx), j+2))
#backwards left check
if( i + (2*dirx) >= 0 and i + (2*dirx) < 8 and j - 2 >= 0 and j - 2 < 8):
if( str(board[i+dirx][j-1]) in enemy and board[i+(2*dirx)][j-2] == None):
jumps.append([[i,j],[i+(2*dirx),j-2]])
board[i+dirx][j-1] = None
jumps.extend(get_valid_jumps(board, player, i+(2*dirx), j-2))
return jumps;
#return a copy of board after executing a given move
def execute_move(board_ref, move, player):
board = [row[:] for row in board_ref]
if(abs(move[0][0] - move[1][0]) == 1 and abs(move[0][1] - move[1][1]) == 1):
board[move[1][0]][move[1][1]] = board[move[0][0]][move[0][1]]
board[move[0][0]][move[0][1]] = None
else:
for x in range(len(move)-1):
board[(move[x][0] + move[x+1][0])//2][(move[x][1] + move[x+1][1])//2] = None
board[move[len(move)-1][0]][move[len(move)-1][1]] = board[move[0][0]][move[0][1]]
board[move[0][0]][move[0][1]] = None
king_row = ( 0 if (player == "Rr") else 7)
if(move[len(move)-1][0] == king_row):
board[move[len(move)-1][0]][move[len(move)-1][1]] = board[move[len(move)-1][0]][move[len(move)-1][1]].upper()
return board
#fitness fuinction for a given board state
def get_fitness(board, player = "Bb"):
enemy = ("Rr" if (player == "Bb") else "Bb")
score = float()
for (i, row) in enumerate(board):
for (j, value) in enumerate(row):
##square is empty
if(value == None):
continue
#square has a piece
if(value == player[1]):
score += p_weight
if(player == "Bb"):
score += (i+1)/8.0
else:
score += (8-i)/8.0
continue
#square has a king
if(value == player[0]):
score += k_weight;
continue
#square has an enemy piece
if(value == enemy[1]):
score += ep_weight;
if(enemy == "Bb"):
score -= (i+1)/8.0
else:
score -= (8-i)/8.0
continue
#square has an enemy king
if(value == enemy[0]):
score += ek_weight;
return score
#parse arguments, and start script
if __name__ == '__main__':
#check the number of arguements is correct
if(len(sys.argv) != 3):
usage()
exit(1)
board = sys.argv[1].replace("\"","").split(",")
#board = '"b",null,"R",null,null,null,null,null,null,null,null,"r",null,"R",null,null,"r",null,null,null,null,null,null,null,null,"r",null,"r",null,"r",null,null,null,null,null,null,null,null,null,null,null,null,null,"r",null,null,null,null,"r",null,null,null,null,null,null,null,null,"r",null,"r",null,null,null,"r"'.replace("\"","").split(",")
#board = "b,null,b,null,b,null,b,null"
#board += ",null,null,null,b,null,null,null,b"
#board += ",b,null,b,null,b,null,b,null"
#board += ",null,null,null,null,null,null,null,null,null,null,b,null,null,null,null,null"
#board += ",null,r,null,r,null,r,null,r"
#board += ",r,null,r,null,r,null,r,null"
#board += ",null,r,null,r,null,r,null,r"
#board = board.split(',')
#change 1d array to 2d array
board = [board[:8],board[8:16], board[16:24], board[24:32], board[32:40], board[40:48], board[48:56], board[56:64]]
player = sys.argv[2]
#change all instances of "null" string to None
for (i, row) in enumerate(board):
for (j, value) in enumerate(row):
if(value == "null"):
board[i][j] = None;
root = Node(board, player)
fill_tree(root, 3)
score = mini_max(root, True, float('-inf'), float('inf'))
move = None
for child in root.children:
if( child.score == score ):
print(child.move)
move = child
break
#for row in move.board:
#print(row)
|
"""Main product initializer
"""
from zope.i18nmessageid import MessageFactory
from ecreall.trashcan.events import ObjectTrashedEvent, ObjectRestoredEvent
from zope.event import notify
trashcanMessageFactory = MessageFactory('ecreall.trashcan')
from Products.PythonScripts.Utility import allow_module
allow_module('ecreall.trashcan.ITrashedProvidedBy')
allow_module('ecreall.trashcan.providesITrashed')
allow_module('ecreall.trashcan.noLongerProvidesITrashed')
allow_module('ecreall.trashcan.moveObjectsToTrashcanByPaths')
allow_module('ecreall.trashcan.restoreObjectsFromTrashcanByPaths')
allow_module('ecreall.trashcan.api.trash')
allow_module('ecreall.trashcan.api.restore')
allow_module('ecreall.trashcan.api.is_trashed')
import transaction
from zope.interface import alsoProvides, noLongerProvides
from zope.annotation.interfaces import IAnnotations
from OFS.interfaces import IFolder
from AccessControl import Unauthorized
from AccessControl.requestmethod import postonly
from ZODB.POSException import ConflictError
from Products.CMFPlone.utils import transaction_note
from Products.CMFCore.utils import getToolByName
try:
from Products.PluginIndexes.BooleanIndex.BooleanIndex import BooleanIndex
HAS_BOOLEANINDEX = True
except ImportError:
HAS_BOOLEANINDEX = False
from ecreall.trashcan.interfaces import ITrashed
KEY = 'ecreall.trashcan'
def ITrashedProvidedBy(context):
return ITrashed.providedBy(context)
def providesITrashed(context):
annotations = IAnnotations(context)
infos = annotations.get(KEY, {'count': 0})
infos['count'] += 1
infos['ExcludeFromNav'] = context.getExcludeFromNav()
annotations[KEY] = infos
alsoProvides(context, ITrashed)
context.setExcludeFromNav(True)
notify(ObjectTrashedEvent(context))
context.reindexObject(idxs=['trashed', 'object_provides'])
if IFolder.providedBy(context):
for obj in context.objectValues():
providesITrashed(obj)
def noLongerProvidesITrashed(context):
annotations = IAnnotations(context)
infos = annotations.get(KEY, {'count': 0})
infos['count'] -= 1
annotations[KEY] = infos
if infos['count'] <= 0:
noLongerProvides(context, ITrashed)
context.setExcludeFromNav(infos.get('ExcludeFromNav', False))
context.reindexObject(idxs=['trashed', 'object_provides'])
notify(ObjectRestoredEvent(context))
if IFolder.providedBy(context):
for obj in context.objectValues():
noLongerProvidesITrashed(obj)
def pasteObject(obj, event):
if event.newParent is None:
# RemovedObjectEvent
return
if ITrashed.providedBy(event.newParent):
raise Unauthorized("You can't paste into a trashcan")
if ITrashed.providedBy(obj):
annotations = IAnnotations(obj)
annotations[KEY] = {'count': 0}
noLongerProvides(obj, ITrashed)
obj.reindexObject(idxs=['trashed', 'object_provides'])
# Copied from PloneTool.py:deleteObjectsByPaths and adapted to move to trashcan
def moveObjectsToTrashcanByPaths(self, paths, handle_errors=True,
REQUEST=None):
failure = {}
success = []
# use the portal for traversal in case we have relative paths
portal = getToolByName(self, 'portal_url').getPortalObject()
traverse = portal.restrictedTraverse
for path in paths:
# Skip and note any errors
if handle_errors:
sp = transaction.savepoint(optimistic=True)
try:
obj = traverse(path)
providesITrashed(obj)
success.append('%s (%s)' % (obj.title_or_id(), path))
except ConflictError:
raise
except Exception, e:
if handle_errors:
sp.rollback()
failure[path] = e
else:
raise
transaction_note('Moved to trashcan %s' % (', '.join(success)))
return success, failure
moveObjectsToTrashcanByPaths = postonly(moveObjectsToTrashcanByPaths)
def restoreObjectsFromTrashcanByPaths(self, paths, handle_errors=True,
REQUEST=None):
failure = {}
success = []
# use the portal for traversal in case we have relative paths
portal = getToolByName(self, 'portal_url').getPortalObject()
traverse = portal.restrictedTraverse
for path in paths:
# Skip and note any errors
if handle_errors:
sp = transaction.savepoint(optimistic=True)
try:
obj = traverse(path)
if obj.canRestore():
noLongerProvidesITrashed(obj)
success.append('%s (%s)' % (obj.title_or_id(), path))
except ConflictError:
raise
except Exception, e:
if handle_errors:
sp.rollback()
failure[path] = e
else:
raise
transaction_note('Restored %s' % (', '.join(success)))
return success, failure
restoreObjectsFromTrashcanByPaths = postonly(restoreObjectsFromTrashcanByPaths)
|
from shared.codejam_plumbing import GCJParsedInput, GCJOutputs
import re
_CodeJamRound = "2016.1B"
_Question = "A"
_AttemptNo = 1
_SmallLargeSample = 'large' # pick between 'sample', 'small' (requires attemptNo), 'large', 'practice'
assert _SmallLargeSample in ('sample', 'small', 'large', 'practice'), "Invalid configuration"
files = {'large': ('inputs\%s-large.in' % _Question, 'outputs\%s-large-output' % _Question),
'small': ('inputs\%s-small-attempt%s.in' % (_Question, _AttemptNo), 'outputs\%s-small-output' % _Question),
'sample': (r'inputs\%s-sample.in' % _Question, r'outputs\test_output.txt'),
'practice': (r'inputs\%s-small-practice.in' % _Question, r'outputs\%s-small-practice-output.txt' % _Question)}
f_in, f_out = files[_SmallLargeSample]
scenarios = GCJParsedInput(file_path=f_in, len_type="fixed", len_function=1)
outfile = GCJOutputs(file_path=f_out, failure_response='xx')
for caseNo, caseData in scenarios:
print(caseNo, caseData)
def solve_case(inputs):
solution = None
garbled = str(inputs[0].strip('\n'))
mangled = ''.join(sorted(garbled))
print(mangled)
unparsed = dict()
parsed = ''
for letter in mangled.lower():
if letter in unparsed:
unparsed[letter] += 1
else:
unparsed[letter] = 1
print(unparsed)
rules = (
('z', 'zero', '0'), ('w', 'two', '2'), ('x', 'six', '6'), ('g', 'eight', '8'),
('h', 'three', '3'), ('r', 'four', '4'), ('f', 'five', '5'), ('v', 'seven', '7'),
('o', 'one', '1'), ('i', 'nine', '9')
)
for character, word, digit in rules:
if character in unparsed:
if unparsed[character] > 0:
count = unparsed[character]
# print("found %s times %s's" % (count, digit))
parsed += digit*count
for x in word:
unparsed[x] -= count
sums = sum(x for x in unparsed.values())
assert sums == 0
if sums != 0:
print(unparsed, garbled)
return "".join(sorted(parsed))
outfile[caseNo+1] = solve_case(caseData)
outfile.save_results()
""" Contents of codejam.plumbing (pasted for completeness)"""
# class GCJOutputs:
# def __init__(self, file_path, failure_response=None, debug=False):
# self.file = file_path
# self.answers = dict()
# self.debugOn = debug
# self.answerPrefix = "Case #%s: "
# self.NoneSolution = failure_response
#
# def save_results(self):
# answers = list()
# for case in range(1, len(self.answers) + 1):
# caseNo, caseSolution = self.answers[case]
# if caseSolution is None:
# caseSolution = self.NoneSolution
# sOutput = self.answerPrefix + str(caseSolution)
# answers.append(sOutput % caseNo)
#
# with open(self.file, "a") as f:
# f.write('\n'.join(answers))
#
# def __setitem__(self, key, value):
# if value is None:
# if self.NoneSolution == 'raise':
# raise ValueError('case cannot have a solution of None')
# else:
# value = self.NoneSolution
# self.answers[key] = (key, value)
# if self.debugOn:
# print(self.answerPrefix % key, value)
#
# class GCJParsedInput:
# def __init__(self, file_path, len_type="fixed", len_function=1):
# with open(file_path) as inp:
# self.raw = inp.readlines()
# self.length = int(self.raw[0])
# self.scenarios = dict()
#
# cursor_at_line = 1
# while len(self.scenarios) < self.length:
# if len_type == "fixed":
# self.scenarios[len(self.scenarios)] = self.raw[cursor_at_line:cursor_at_line + len_function]
# cursor_at_line += len_function
# elif len_type == "function":
# x = int(self.raw[cursor_at_line].strip("\n"))
# self.scenarios[len(self.scenarios)] = self.raw[cursor_at_line:cursor_at_line + len_function(x)]
# cursor_at_line += len_function(x)
# else:
# raise ValueError
# print("Parsed", self.scenarios)
#
# def __len__(self):
# return self.length
#
# def __getitem__(self, item):
# return self.scenarios[item]
#
# def __iter__(self):
# for x in self.scenarios:
# yield x, self.scenarios[x]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 23 02:12:27 2020
@author: dmin
"""
from urllib.request import urlretrieve
from ols_reg_function import *
# Import pandas
import pandas as pd
# Assign url of file: url
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv'
# Save file locally
urlretrieve(url, 'winequality-white.csv')
# Run our written function
ols_reg('winequality-white', 'fixed acidity', ['citric acid', 'free sulfur dioxide', 'residual sugar']) |
"""
From the US Constitution
Amendment IX: The enumeration in the Constitution, of certain rights, shall not be construed to deny or disparage others
retained by the people.
Algorithm: Checks if agent's actions hinder the process of a speedy public trial,
If this amendment is violated by the agent's actions:
return True
Else:
return False
Author: Stephen Zolnik
Github: https://github.com/szolnik3
Email: sjzolnik@hotmail.com
"""
class Ninth_Amendment:
def __init__(self):
self.agent_action = {}
def violates_trial(self):
trial = (False, "Amendment IX: Hinders a speedy trial")
return trial
# Call run_amendment_8 to check all points
def run_amendment_9(self, agent_action):
self.agent_action = agent_action
print "__NINTH AMENDMENT__"
self_destruct = self.violates_trial()
return self_destruct
|
f =float(input('请输入华氏温度:'))
c =(f-32)/1.8
print (f'{f:.2f}华氏度= {c:.2f}摄氏度') |
"""
******************************************************************************
* Purpose: Write a program Calendar that takes the read month and year from user and prints the Calendar of the month.
*
* @author: Manjunath Mugali
* @version: 3.7
* @since: 21-01-2019
*
*******************************************************************************
"""
from Utility_DataStructure.DataStructureOperations import LinkedList
l1 = LinkedList()
class Calendar:
try:
print("Enter the Value of the Month:")
month = input() # read month value
while not month.isdigit() or int(month) > 12: # validating month
print("Please Provide Valida input for month")
month = input()
print("Enter the Value of Year")
year = input() # read year value
while not year.isdigit() or int(year) < 999: # validating year
print("Please Provide Valid Input")
year = input()
l1.printCalOfMonth(int(month), int(year))
except ValueError:
print("Please Enter Only Integer Value")
|
import sys
num = int(sys.stdin.readline())
solutes = sorted(list(map(int, sys.stdin.readline().split()))[:num])
min_diff = (abs(solutes[0] + solutes[1]), solutes[0], solutes[1])
print(solutes)
def binary_search(idx, val):
start = idx + 1
end = len(solutes) - 1
while start < end:
mid = (start + end) // 2
if mid == start or mid >= end:
break
if solutes[mid] < val:
start = mid
elif solutes[mid] > val:
end = mid
else: # mid == val
# print(-val, solutes[mid])
exit(0)
return start, end
for i in range(len(solutes)-1):
s, e = binary_search(i, -solutes[i])
diff_s = abs(solutes[i] + solutes[s])
diff_e = abs(solutes[i] + solutes[e])
if min_diff[0] > min(diff_s, diff_e):
if diff_s < diff_e:
min_diff = (diff_s, solutes[i], solutes[s])
else:
min_diff = (diff_e, solutes[i], solutes[e])
print(*min_diff[1:]) |
import math
n=[int(i) for i in input().split()]
a=n[0]
b=n[1]
v=n[2]
print(math.ceil((v-a)/(a-b))+1)
|
import argparse
from typing import Any, List, Optional, Tuple, Union
# pyre-ignore
from data_generator.cli_parser import convert_args, parse_inputs, verify
from data_generator.generator import assemble_data_generators
from data_generator.output import to_csv, to_excel, to_json
from data_generator.toml import get_input
def run_cli_inputs(args: argparse.Namespace) -> Union[Tuple[Any], int]:
"""Returns data generators and other needed args, when user uses CLI to enter inputs.
Arguments:
args {argparse.Namespace} -- args entered via CLI
Returns:
Union[tuple, int] -- (data generators dict, cli args dict), 1: if NOK
"""
if verify(args) is None:
converted_args = convert_args(args)
print("--> Parsed CLI inputs converted to dictionary.\r\n")
result = assemble_data_generators(converted_args)
print("--> Data generators created.\r\n")
return (result, converted_args)
return 1
def run_toml_inputs(args: argparse.Namespace) -> Union[List[Tuple[Any]], List[int]]:
"""Retruns data generators and other needed args, when user uses TOML files to provide inputs.
Arguments:
args {argparse.Namespace} -- args entered via CLI
Returns:
Union[List[tuple], List[int]] -- list of (data generators dict, cli args dict), 1: if NOK
"""
output = []
try:
converted_args = convert_args(args)
print("--> Parsed CLI inputs converted to dictionary.\r\n")
if len(converted_args["toml"]) > 0:
for filepath in converted_args["toml"]:
conf_dict = get_input(filepath)
result = assemble_data_generators(conf_dict)
print("--> Data generators created.\r\n")
output.append((result, conf_dict))
return output
return [1]
except Exception as e:
print(f"Exception in func 'run_toml_inputs': {str(e)}")
return [1]
def run_outputs(inputs: Union[Tuple[Any], int]) -> Optional[int]:
"""Generates data via generators and saves them to specified file format.
Arguments:
inputs {tuple} -- (data generators dict, cli args dict)
"""
print("--> Data generation and saving starting... \n")
if isinstance(inputs, tuple):
if inputs[1]["save_as"] == "json":
to_json(inputs[0], inputs[1]["rows"], inputs[1]["folder"])
elif inputs[1]["save_as"] == "xlsx":
to_excel(inputs[0], inputs[1]["rows"], inputs[1]["folder"])
else:
to_csv(inputs[0], inputs[1]["rows"], inputs[1]["folder"])
print(f"""\n--> FINISHED. Find your data at '{inputs[1]["folder"]}' folder.""")
if isinstance(inputs, int):
print(f"Could not generate data and save them. Feeding func returned {inputs}")
return 1
return None
def main() -> Union[List[int], List[None]]:
results = []
args = parse_inputs()
print("--> CLI input parsed\r\n")
if hasattr(args, "specify"):
output = run_cli_inputs(args)
results.append(run_outputs(output))
if hasattr(args, "toml"):
outputs = run_toml_inputs(args)
for output in outputs:
results.append(run_outputs(output))
return results
if __name__ == "__main__":
main()
|
NUM_WRONG_GUESSES = 3
NUM_GOOD_GUESSES = 3
POINTS_PER_GOOD_GUESS = 2
POINTS_PER_WRONG_GUESS = -1
LETTERS = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z']
NUMBERS = set("1234567890")
HOST = None
DB = "WikiTrivi_DB"
PAGE_COL = "pages"
WORD_COL = "common_words"
USER_COL = "users"
FAILER_GIFS_COL = "fail_gifs"
WINNER_GIFS_COL = "win_gifs"
WIKI_EXLUDE_VALS = "list main"
NUMBERS_RESPONSES = ["No way I'm accepting that.",
"Are you kidding? you can't guess numbers or letters....",
"Nope! i don't accept numbers and letters",
"Didn't I tell you? no numbers or letters"]
COMMON_RESPONSES = ["C'mon,This word is way too common...",
"Nah, Be more original with your words.",
"I want some good content, Buddy. Too common",
"Your guesses are so boring. Too common",
"Don't just give me every day words."]
FAIL_RESPONSES = ["Nope! You're wrong. tries left: {}",
"Wrong! watch out! only {} wrong guesses left",
"That is a novice mistake... only {} more like that.",
"Don't just make up things. {} more mistakes for you.",
"Nah, Not a good guess this one. {} more",
"You sure you know it? only {} errors left."]
SUCCESS_RESPONSES = ["Way to go! you'll finish in {} guesses.",
"Alrighty! only {} words to guess.",
"You Rock! guess me {} more.",
"Your Knowledge is astounding. only {} more words",
"I worship your brain! c'mon {} and you're done."]
WIN_RESPONSES = ["You win!!!!!\nYour tound score is {}.\nTotal score: {} \n url{}", "and that's a win! with a score of {}url{}",
"BAM! win! with {} points this round.\nTotal points: {} \nurl{}",
"clap your hands for this one!you won this round with {} points.\nTotal points: {} \nurl{}",
"aaaaand you win! {} points earned this round! \nTotal points: {} \nurl{}"]
LOSE_RESPONSES = ["Nah, You failed this round.\n Your score is {}\nwould you like to hear about this subject?url{}",
"Hahaha fail! You're out with a score of {}.\nwanna learn about it?url{}",
"Sorry buddy you failed, maybe next round.\nyou got {} points.url{}",
# "GameOver.\nscore:{}url{}",
"So did you really know this one? I guess not, you failed! \n{} points for you. url{}"]
CHOOSE_VALUE = ["So have you heard about {}?",
"really? what about {}?",
"This too? so head of {}?",
"Just pick one. {}?", ]
INVALID_ANSWERS = ["invalid answer.\nchoose 'yes' or 'no'."]
TITLE_REPONSES = ["hey, enter words about it...",
"You can't just reuse the title",
"I see what you did there...",
"No way I'm accepting that."]
REPEATING_GUESS = ["Nice try. can't fool me. you used this word already","No recycling words here."]
INFINITE_SUCCESS = ["Nice. Go On"] |
from django.shortcuts import render,redirect, get_object_or_404
from genre.models import UsersGenre
from .models import Explore, Playlist
from .forms import PlaylistForm
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def explore_view(request):
user= request.user
user_genre = UsersGenre.objects.filter(user=user)
for genre in user_genre:
genre1=genre.user_genres.all()[0]
genre2=genre.user_genres.all()[1]
genre3=genre.user_genres.all()[2]
explore = Explore.objects.filter(genre__genres=genre1) | Explore.objects.filter(genre__genres =genre2) | Explore.objects.filter(genre__genres =genre3)
if request.method == "POST":
if "music_selected" in request.POST:
selected_music = request.POST.getlist('music_select')
your_playlist = Playlist(user=user)
your_playlist.save()
your_playlist.playlist.add(*selected_music)
if user is not None:
return redirect('playlist')
context = {
"explore":explore,
}
return render(request, 'soul/explore.html', context)
@login_required
def playlist_view(request):
user= request.user
play_list = Playlist.objects.filter(user=user)
for play in play_list:
your_playlist = play.playlist.all()
context={
"your_playlist":your_playlist
}
return render(request, 'soul/playlist.html', context)
|
# -*- coding: utf-8 -*-
"""
file: gromacs_setup.py
Function for preparing input definitions for a GROMACS
Linear Interaction Energy MD calculation
"""
import os
import logging
import re
def correct_itp(topfile, topOutFn, posre=True, outitp={}, removeMols=[], replaceMols=[], excludePosre=[], excludeHH=[],
miscMols=[]):
"""
Correct hydrogen and heavy atom masses in the .itp file
makes position restraint file for the ligand
outitp={'atomtypes': {'outfile':'attype.itp', 'overwrite':True}}
"""
print("CORRECT ITP")
if posre:
posreNm = "%s-posre.itp"%os.path.splitext(os.path.basename(topOutFn))[0]
else:
posreNm = None
#read itp
print("READ TOP")
blocks, listBlocks, listMols = readCard(topfile)
print("REMOVE MOLS")
# remove mols; eg. WAT to be substituted with SOL in amber to gromacs conversion
blocks, listBlocks, listMols = topRmMols(blocks, listBlocks, removeMols)
print("REPLACE MOLS")
blocks, listBlocks=topReplaceMols(blocks, listBlocks, replaceMols)
print("HH")
#apply heavy hydrogens(HH)
newBlocks=heavyH(listBlocks, blocks, listMols, excludeList=excludeHH)
print("POSRES")
#create positional restraints file
if posre:
posreNm=outPosre(blocks, listBlocks, listMols, excludePosre)
else:
posreNm={}
print("ADD MOLS")
#add additional moleculetypes (e.g. solvent and ions)
miscBlocks, miscListBlocks, miscListMols=([], [], [])
for mol in miscMols:
b, lb, lm=readCard(mol)
miscBlocks+=b
miscListBlocks+=lb
miscListMols+=lm
fixNewBlocks, fixListBlocks=itpAddMols(blocks, listBlocks, miscBlocks, miscListBlocks)
# replace mols in system definition
print("OUT ITP")
#write corrected itp (with HH and no atomtype section
topOut, extItps=itpOut(fixNewBlocks, fixListBlocks, topOutFn, posre=posreNm, excludeList=outitp)
results={
'top':topOut,
'posre':[ posreNm[i] for i in posreNm],
'externalItps':extItps
}
return results
def readCard(filetop):
logging.debug('read topology')
blockNames=[]
listBlocks=[]
title=False
read=False
with open(filetop, 'r') as itp:
block=[]
for line in itp:
atom=[]
if line.startswith('#'):
if block!=[]:
listBlocks.append(block)
block=[]
listBlocks.append(line)
blockNames.append(None)
else:
line_sp=re.split('\s*', line[:-1])
for item in line_sp:
if re.match(";", item):
break
elif item == "[":
if block!=[]:
listBlocks.append(block)
block=[]
title=True
read=False
elif item=="]":
title=False
read=True
elif (title==True) and (item!=''):
blockNames.append(item)
elif (read==True) and (item!=''):
atom.append(item)
if (atom!=[]):
block.append(atom)
if block!=[]:
listBlocks.append(block)
# for molecule get:
# name
# index of the block with atoms
# index of block with bonds
listMols=[]
mol={}
for nbl, blockNm in enumerate(blockNames):
if blockNm == 'moleculetype':
if len(mol)>0:
listMols.append(mol)
mol={}
mol['name']=listBlocks[nbl][0][0]
elif blockNm == 'atoms':
mol['atoms']=nbl
elif blockNm == 'bonds':
mol['bonds']=nbl
if len(mol)>0:
listMols.append(mol)
return (listBlocks, blockNames, listMols)
def topRmMols(blocks, blockNames, mols2Del):
print("TOP RM MOLS")
popOut=False
listOut=[]
for nbl, blName in enumerate(blockNames):
if blName=='moleculetype':
if blocks[nbl][0][0] in mols2Del:
popOut=True
else:
popOut=False
if blName=='system':
popOut=False
if popOut:
listOut.append(nbl)
listOut.sort(reverse=True)
print("EXCLUDE", listOut)
for nbl in listOut:
blocks.pop(nbl)
blockNames.pop(nbl)
print("CREATE LISTMOLS")
listMols=[]
mol={}
for nbl, blockNm in enumerate(blockNames):
if blockNm == 'moleculetype':
if len(mol)>0:
listMols.append(mol)
mol={}
mol['name']=blocks[nbl][0][0]
elif blockNm == 'atoms':
mol['atoms']=nbl
elif blockNm == 'bonds':
mol['bonds']=nbl
if len(mol)>0:
listMols.append(mol)
print("LISTMOLS ", listMols)
return (blocks, blockNames, listMols)
def topReplaceMols(blocks, blockNames, mols2Rep):
# nol2Rep: [{'in':'WAT', 'out':'SOL'}, ..]
print('TOPREPLACE')
listin=[x['in'] for x in mols2Rep]
for nbl, blName in enumerate(blockNames):
if blName=='molecules':
for mol in blocks[nbl]:
if mol[0] in listin:
mol[0]=mols2Rep[listin.index(mol[0])]['out']
return (blocks, blockNames)
def heavyH(blockNames, blocks, listMols, excludeList=['WAT']):
'''Adjust the weights of hydrogens, and their heavy atom partner'''
for mol in listMols:
if mol['name'] not in excludeList:
for bond in blocks[mol['bonds']]:
for hI in [0, 1]:
if re.match("^h|^H", blocks[mol['atoms']][int(bond[hI])-1] [1]):
if hI==0:
hJ=1
elif hI==1:
hJ=0
## Change heavy atom (heavy -3*H)
blocks[mol['atoms']][int(bond[hJ])-1][7]=("%.5f" % ( float(blocks[mol['atoms']][int(bond[hJ])-1][7]) \
- float(blocks[mol['atoms']][int(bond[hI])-1][7])*3 ) )
## Change hydrogen (4*H)
blocks[mol['atoms']][int(bond[hI])-1][7]=("%.5f" % ( float(blocks[mol['atoms']][int(bond[hI])-1][7])*4) )
return(blocks)
def outPosre(blocks, listBlocks, listMols, excludeList):
outposre={}
for mol in listMols:
if mol['name'] not in excludeList:
oitp='%s-posre.itp'%mol['name']
outposre[mol['name']]=oitp
with open(oitp, "w") as outFile:
outFile.write(\
'#ifndef 1POSCOS\n\
#define 1POSCOS 10000\n\
#endif\n\
#ifndef 2POSCOS\n\
#define 2POSCOS 5000\n\
#endif\n\
#ifndef 3POSCOS\n\
#define 3POSCOS 2000\n\
#endif\n\
#ifndef 4POSCOS\n\
#define 4POSCOS 1000\n\
#endif\n\
[ position_restraints ]\n')
for atom in blocks[mol['atoms']]:
if not atom[4].startswith('H'):
if atom[3] == 'HEM':
outFile.write("%-4s 1 1POSCOS 1POSCOS 1POSCOS\n" % atom[0])
elif atom[4] in ['CA', 'N', 'O', 'C']:
outFile.write("%-4s 1 1POSCOS 1POSCOS 1POSCOS\n" % atom[0])
elif atom[4] in ['CB']:
outFile.write("%-4s 1 2POSCOS 2POSCOS 2POSCOS\n" % atom[0])
elif atom[4] in ['CG']:
outFile.write("%-4s 1 3POSCOS 3POSCOS 3POSCOS\n" % atom[0])
else:
outFile.write("%-4s 1 4POSCOS 4POSCOS 4POSCOS\n" % atom[0])
return outposre
def itpAddMols(blocks, nameBlocks, miscBlocks, miscNameBlocks):
##FIX ATOMTYPES
idxTypes=nameBlocks.index('atomtypes')
idxNewTypes=[ i for i, x in enumerate(miscNameBlocks) if x=='atomtypes']
for AttypeBlock in idxNewTypes:
for newAtm in miscBlocks[AttypeBlock]:
addAtm=True
for atm in blocks[idxTypes]:
if newAtm[0]==atm[0]:
addAtm=False
break
if addAtm:
blocks[idxTypes].append(newAtm)
## ADD MOLECULETYPE
# new molecules are added before the system statement
idxSystem=nameBlocks.index('system')
blNoAty=0
for bl in range(len(miscNameBlocks)):
if bl not in idxNewTypes:
insIdx=idxSystem+blNoAty
blocks.insert(insIdx, miscBlocks[bl])
nameBlocks.insert(insIdx, miscNameBlocks[bl])
blNoAty+=1
return blocks, nameBlocks
def itpOut(blocks, nameBlocks, oitp, posre, excludeList={}):
'''write new top. blocks defined in excludeList are removed and saved in the file 'outfile'. e.g atomtypes'''
def outPosre(posreFN):
outFile.write('#ifdef POSRES\n#include "%s"\n#endif\n\n'%posreFN)
def outBlock(blockName, block, output):
output.write("[ %s ]\n"%blockName)
outFormat=defineFMTblock(block)
for item in block:
output.write(outFormat.format(d=item))
extItps=[]
with open(oitp, "w") as outFile:
molWithPosre=False
molName=None
for nbl, blockName in enumerate(nameBlocks):
if blockName is None: # preprocessing instructions
outFile.write(blocks[nbl])
elif blockName in excludeList: # specific itp
#WRITE EXTERNAL ITP TO INCLUDE IF REQUIRED
if excludeList[blockName]['overwrite']:
openMode='w'
else:
openMode='a'
with open(excludeList[blockName]['outfile'], openMode) as outItp:
outBlock(blockName, blocks[nbl], outItp)
extItps.append(excludeList[blockName]['outfile'])
outFile.write('#include "%s"\n\n'%excludeList[blockName]['outfile'])
# outitp
else:
# WRITE INCLUDE POSRE IF REQUIRED
if blockName=='moleculetype':
if molWithPosre:
outPosre(posre[molName])
molName=blocks[nbl][0][0]
if molName in posre:
molWithPosre=True
else:
molWithPosre=False
if blockName=='system':
if molWithPosre:
outPosre(posre[molName])
# PRINT OUT BLOCK
outBlock(blockName, blocks[nbl], outFile)
outFile.write("\n")
return oitp, extItps
def defineFMTblock(block):
listFmt=[]
for atom in block:
for i, item in enumerate(atom):
try:
listFmt[i].append(len(item))
except IndexError:
listFmt.append([len(item)])
nchars=[max(x)+2 for x in listFmt]
fmtOut=""
for n, col in enumerate(nchars):
fmtOut=fmtOut+"{d[%d]:>%ds}"%(n, col)
fmtOut=fmtOut+"\n"
return fmtOut
def correctAttype(itp, newtypes):
oldtypes=[x[0] for x in itp['atomtypes']]
for attype in newtypes:
if not attype[0] in oldtypes:
itp['atomtypes'].append(attype)
return itp
|
"""
The lower_convex_hull module handles geometric calculations associated with
equilibrium calculation.
"""
from __future__ import print_function
from pycalphad.log import logger
from pycalphad.core.cartesian import cartesian
import numpy as np
# The energetic difference, in J/mol-atom, below which is considered 'zero'
DRIVING_FORCE_TOLERANCE = 1e-8
def _initialize_array(global_grid, result_array):
"Fill in starting values for the energy array."
# Profiling says it's way faster to compute one global max_energy value
# than to compute it per condition and do a broadcast assignment
# This will cause some minor differences in the driving force for the first few iterations
# but it shouldn't be a big deal
max_energy = global_grid['GM'].values.max()
len_comps = result_array.dims['component']
if max_energy == np.nan:
raise ValueError('Input energy surface contains one or more NaNs.')
result_array['GM'].values[...] = max_energy
result_array['MU'].values[...] = np.nan
result_array['NP'].values[...] = np.nan
# Initial simplex for each target point in will be
# the fictitious hyperplane
# This hyperplane sits above the system's energy surface
# The reason for this is to guarantee our initial simplex contains
# the target point
# Note: We're assuming that the max energy is in the first few, presumably
# fictitious points instead of more rigorously checking with argmax.
result_array['points'].values[...] = np.arange(len_comps)
def lower_convex_hull(global_grid, result_array, verbose=False):
"""
Find the simplices on the lower convex hull satisfying the specified
conditions in the result array.
Parameters
----------
global_grid : Dataset
A sample of the energy surface of the system.
result_array : Dataset
This object will be modified!
Coordinates correspond to conditions axes.
verbose : bool
Display details to stdout. Useful for debugging.
Returns
-------
None. Results are written to result_array.
Notes
-----
This routine will not check if any simplex is degenerate.
Degenerate simplices will manifest with duplicate or NaN indices.
Examples
--------
None yet.
"""
conditions = [x for x in result_array.coords.keys() if x not in ['vertex',
'component']]
indep_conds = sorted([x for x in sorted(result_array.coords.keys()) if x in ['T', 'P']])
indep_shape = tuple(len(result_array.coords[x]) for x in indep_conds)
comp_conds = sorted([x for x in sorted(result_array.coords.keys()) if x.startswith('X_')])
comp_shape = tuple(len(result_array.coords[x]) for x in comp_conds)
pot_conds = sorted([x for x in sorted(result_array.coords.keys()) if x.startswith('MU_')])
# force conditions to have particular ordering
conditions = indep_conds + pot_conds + comp_conds
trial_shape = (len(result_array.coords['component']),)
trial_points = None
_initialize_array(global_grid, result_array)
# Enforce ordering of shape if this is the first iteration
if result_array.attrs['hull_iterations'] == 1:
result_array['points'] = result_array['points'].transpose(*(conditions + ['vertex']))
result_array['GM'] = result_array['GM'].transpose(*conditions)
result_array['NP'] = result_array['NP'].transpose(*(conditions + ['vertex']))
# Determine starting combinations of chemical potentials and compositions
# TODO: Check Gibbs phase rule compliance
if len(pot_conds) > 0:
raise NotImplementedError('Chemical potential conditions are not yet supported')
# FIRST CASE: Only composition conditions specified
# We only need to compute the dependent composition value directly
# Initialize trial points as lowest energy point in the system
if (len(comp_conds) > 0) and (len(pot_conds) == 0):
trial_points = np.empty(result_array['GM'].T.shape)
trial_points.fill(np.inf)
trial_points[...] = global_grid['GM'].argmin(dim='points').values.T
trial_points = trial_points.T
comp_values = cartesian([result_array.coords[cond] for cond in comp_conds])
# Insert dependent composition value
# TODO: Handle W(comp) as well as X(comp) here
specified_components = set([x[2:] for x in comp_conds])
dependent_component = set(result_array.coords['component'].values) - specified_components
dependent_component = list(dependent_component)
if len(dependent_component) != 1:
raise ValueError('Number of dependent components is different from one')
insert_idx = sorted(result_array.coords['component'].values).index(dependent_component[0])
comp_values = np.concatenate((comp_values[..., :insert_idx],
1 - np.sum(comp_values, keepdims=True, axis=-1),
comp_values[..., insert_idx:]),
axis=-1)
# SECOND CASE: Only chemical potential conditions specified
# TODO: Implementation of chemical potential
# THIRD CASE: Mixture of composition and chemical potential conditions
# TODO: Implementation of mixed conditions
if trial_points is None:
raise ValueError('Invalid conditions')
driving_forces = np.zeros(result_array.GM.values.shape + (len(global_grid.points),),
dtype=np.float)
max_iterations = 200
iterations = 0
while iterations < max_iterations:
iterations += 1
trial_simplices = np.empty(result_array['points'].values.shape + \
(result_array['points'].values.shape[-1],), dtype=np.int)
# Initialize trial simplices with values from best guess simplices
trial_simplices[..., :, :] = result_array['points'].values[..., np.newaxis, :]
# Trial simplices will be the current simplex with each vertex
# replaced by the trial point
# Exactly one of those simplices will contain a given test point,
# excepting edge cases
trial_simplices.T[np.diag_indices(trial_shape[0])] = trial_points.T
#print('trial_simplices.shape', trial_simplices.shape)
#print('global_grid.X.values.shape', global_grid.X.values.shape)
flat_statevar_indices = np.unravel_index(np.arange(np.multiply.reduce(result_array.MU.values.shape)),
result_array.MU.values.shape)[:len(indep_conds)]
#print('flat_statevar_indices', flat_statevar_indices)
trial_matrix = global_grid.X.values[np.index_exp[flat_statevar_indices +
(trial_simplices.reshape(-1, trial_simplices.shape[-1]).T,)]]
trial_matrix = np.rollaxis(trial_matrix, 0, -1)
#print('trial_matrix', trial_matrix)
# Partially ravel the array to make indexing operations easier
trial_matrix.shape = (-1,) + trial_matrix.shape[-2:]
# We have to filter out degenerate simplices before
# phase fraction computation
# This is because even one degenerate simplex causes the entire tensor
# to be singular
nondegenerate_indices = np.all(np.linalg.svd(trial_matrix,
compute_uv=False) > 1e-12,
axis=-1, keepdims=True)
# Determine how many trial simplices remain for each target point.
# In principle this would always be one simplex per point, but once
# some target values reach equilibrium, trial_points starts
# to contain points already on our best guess simplex.
# This causes trial_simplices to create degenerate simplices.
# We can safely filter them out since those target values are
# already at equilibrium.
sum_array = np.sum(nondegenerate_indices, axis=-1, dtype=np.int)
index_array = np.repeat(np.arange(trial_matrix.shape[0], dtype=np.int),
sum_array)
comp_shape = trial_simplices.shape[:len(indep_conds)+len(pot_conds)] + \
(comp_values.shape[0], trial_simplices.shape[-2])
comp_indices = np.unravel_index(index_array, comp_shape)[len(indep_conds)+len(pot_conds)]
fractions = np.full(result_array['points'].values.shape + \
(result_array['points'].values.shape[-1],), -1.)
fractions[np.unravel_index(index_array, fractions.shape[:-1])] = \
np.linalg.solve(np.swapaxes(trial_matrix[index_array], -2, -1),
comp_values[comp_indices])
# A simplex only contains a point if its barycentric coordinates
# (phase fractions) are non-negative.
bounding_indices = np.all(fractions >= 0, axis=-1)
#print('BOUNDING INDICES', bounding_indices)
#zero_success_trials = np.sum(bounding_indices, axis=-1, dtype=np.int, keepdims=False) == 0
#if np.any(zero_success_trials):
# print(trial_matrix[np.nonzero(zero_success_trials)[:-1]])
# If more than one trial simplex satisfies the non-negativity criteria
# then just choose the first non-degenerate one. This addresses gh-28.
# There is also the possibility that *none* of the trials were successful.
# This is usually due to numerical problems at the limit of composition space.
# We will sidestep the issue here by forcing the last first non-degenerate simplex to match in that case.
multiple_success_trials = np.sum(bounding_indices, axis=-1, dtype=np.int, keepdims=False) != 1
#print('MULTIPLE SUCCESS TRIALS SHAPE', np.nonzero(multiple_success_trials))
if np.any(multiple_success_trials):
saved_trial = np.argmax(np.logical_or(bounding_indices[np.nonzero(multiple_success_trials)],
nondegenerate_indices.reshape(bounding_indices.shape)[np.nonzero(multiple_success_trials)]), axis=-1)
#print('SAVED TRIAL', saved_trial)
#print('BOUNDING INDICES BEFORE', bounding_indices)
bounding_indices[np.nonzero(multiple_success_trials)] = False
#print('BOUNDING INDICES FALSE', bounding_indices)
bounding_indices[np.nonzero(multiple_success_trials) + np.index_exp[saved_trial]] = True
#print('BOUNDING INDICES AFTER', bounding_indices)
fractions.shape = (-1, fractions.shape[-1])
bounding_indices.shape = (-1,)
index_array = np.arange(trial_matrix.shape[0], dtype=np.int)[bounding_indices]
raveled_simplices = trial_simplices.reshape((-1,) + trial_simplices.shape[-1:])
candidate_simplices = raveled_simplices[index_array, :]
#print('candidate_simplices', candidate_simplices)
# We need to convert the flat index arrays into multi-index tuples.
# These tuples will tell us which state variable combinations are relevant
# for the calculation. We can drop the last dimension, 'trial'.
#print('trial_simplices.shape[:-1]', trial_simplices.shape[:-1])
statevar_indices = np.unravel_index(index_array, trial_simplices.shape[:-1]
)[:len(indep_conds)+len(pot_conds)]
aligned_energies = global_grid.GM.values[statevar_indices + (candidate_simplices.T,)].T
statevar_indices = tuple(x[..., np.newaxis] for x in statevar_indices)
#print('statevar_indices', statevar_indices)
aligned_compositions = global_grid.X.values[np.index_exp[statevar_indices + (candidate_simplices,)]]
#print('aligned_compositions', aligned_compositions)
#print('aligned_energies', aligned_energies)
candidate_potentials = np.linalg.solve(aligned_compositions.astype(np.float, copy=False),
aligned_energies.astype(np.float, copy=False))
#print('candidate_potentials', candidate_potentials)
logger.debug('candidate_simplices: %s', candidate_simplices)
comp_indices = np.unravel_index(index_array, comp_shape)[len(indep_conds)+len(pot_conds)]
#print('comp_values[comp_indices]', comp_values[comp_indices])
candidate_energies = np.multiply(candidate_potentials,
comp_values[comp_indices]).sum(axis=-1)
#print('candidate_energies', candidate_energies)
# Generate a matrix of energies comparing our calculations for this iteration
# to each other.
# 'conditions' axis followed by a 'trial' axis
# Empty values are filled in with infinity
comparison_matrix = np.empty([trial_matrix.shape[0] / trial_shape[0],
trial_shape[0]])
if comparison_matrix.shape[0] != aligned_compositions.shape[0]:
raise ValueError('Arrays have become misaligned. This is a bug. Try perturbing your composition conditions '
'by a small amount (1e-4). If you would like, you can report this issue to the development'
' team and they will fix it for future versions.')
comparison_matrix.fill(np.inf)
comparison_matrix[np.divide(index_array, trial_shape[0]).astype(np.int),
np.mod(index_array, trial_shape[0])] = candidate_energies
#print('comparison_matrix', comparison_matrix)
# If a condition point is all infinities, it means we did not calculate it
# We should filter those out from any comparisons
calculated_indices = ~np.all(comparison_matrix == np.inf, axis=-1)
# Extract indices for trials with the lowest energy for each target point
lowest_energy_indices = np.argmin(comparison_matrix[calculated_indices],
axis=-1)
# Filter conditions down to only those calculated this iteration
calculated_conditions_indices = np.arange(comparison_matrix.shape[0])[calculated_indices]
#print('comparison_matrix[calculated_conditions_indices,lowest_energy_indices]',comparison_matrix[calculated_conditions_indices,
# lowest_energy_indices])
# This has to be greater-than-or-equal because, in the case where
# the desired condition is right on top of a simplex vertex (gh-28), there
# will be no change in energy changing a "_FAKE_" vertex to a real one.
is_lower_energy = comparison_matrix[calculated_conditions_indices,
lowest_energy_indices] <= \
result_array['GM'].values.flat[calculated_conditions_indices]
#print('is_lower_energy', is_lower_energy)
# These are the conditions we will update this iteration
final_indices = calculated_conditions_indices[is_lower_energy]
#print('final_indices', final_indices)
# Convert to multi-index form so we can index the result array
final_multi_indices = np.unravel_index(final_indices,
result_array['GM'].values.shape)
updated_potentials = candidate_potentials[is_lower_energy]
result_array['points'].values[final_multi_indices] = candidate_simplices[is_lower_energy]
result_array['GM'].values[final_multi_indices] = candidate_energies[is_lower_energy]
result_array['MU'].values[final_multi_indices] = updated_potentials
result_array['NP'].values[final_multi_indices] = \
fractions[np.nonzero(bounding_indices)][is_lower_energy]
#print('result_array.GM.values', result_array.GM.values)
# By profiling, it's faster to recompute all driving forces in-place
# versus doing fancy indexing to only update "changed" driving forces
# This avoids the creation of expensive temporaries
np.einsum('...i,...i',
result_array.MU.values[..., np.newaxis, :],
global_grid.X.values[np.index_exp[...] + ((np.newaxis,) * len(comp_conds)) + np.index_exp[:, :]],
out=driving_forces)
np.subtract(driving_forces,
global_grid.GM.values[np.index_exp[...] + ((np.newaxis,) * len(comp_conds)) + np.index_exp[:]],
out=driving_forces)
# Update trial points to choose points with largest remaining driving force
trial_points = np.argmax(driving_forces, axis=-1)
#print('trial_points', trial_points)
logger.debug('trial_points: %s', trial_points)
# If all driving force (within some tolerance) is consumed, we found equilibrium
if np.all(driving_forces <= DRIVING_FORCE_TOLERANCE):
return
if verbose:
print('Max hull iterations exceeded. Remaining driving force: ', driving_forces.max())
|
# coding=utf-8
__author__ = 'stefano'
import logging
from pprint import pprint
from optparse import make_option
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from openpyxl import load_workbook, Workbook
from django.core.management.base import BaseCommand
from openaid.projects.models import Project, Activity, Initiative
#import projects start/end dates from xlsx file
#at the moment just outputs xls file with error dates
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--file',
dest='file',
default='',
help='path to input file'),
)
help = 'import projects start/end dates from xlsx file'
logger = logging.getLogger('openaid')
default_date_format = '%d/%m/%y'
def date_to_string(self, date, date_format=None):
if date_format is None:
date_format = self.default_date_format
else:
date_format = date_format
if date is not None:
try:
return datetime.strftime(date, date_format)
except (ValueError, TypeError):
self.logger.error("Wrong date value:{}".format(date))
return ''
return ''
def get_reduced_row(self, row, skip_dates):
rr = []
for i in range(9):
value = row[i].value
if skip_dates is False and (4 <= i <= 7):
value = self.date_to_string(value)
rr.append(value)
return rr
def handle(self, *args, **options):
verbosity = options['verbosity']
input_filename = options['file']
error_filename= '{}_dates_errorlog.xlsx'.format(self.date_to_string(datetime.now(),"%Y%m%d%H%M"))
if verbosity == '0':
self.logger.setLevel(logging.ERROR)
elif verbosity == '1':
self.logger.setLevel(logging.WARNING)
elif verbosity == '2':
self.logger.setLevel(logging.INFO)
elif verbosity == '3':
self.logger.setLevel(logging.DEBUG)
error_workbook = Workbook()
ws_pr_notfound = error_workbook.create_sheet(index=0, title='Project number not found')
ws_pr_notunique = error_workbook.create_sheet(index=1, title='Project number not unique')
ws_in_notfound = error_workbook.create_sheet(index=2, title='Initiative code not found')
self.logger.info(u"Opening input file: {}".format(input_filename))
input_file = open(input_filename, 'rb')
input_workbook = load_workbook(input_file, data_only=True)
input_ws = input_workbook['Foglio1']
not_found=multiple=initiative_counter=0
row_counter = 0
for row in input_ws.rows:
if row_counter == 0 :
header = self.get_reduced_row(row=row, skip_dates=True)
row_counter+=1
# write header in error log file
ws_pr_notfound.append(header)
ws_pr_notunique.append(header)
ws_in_notfound.append(header)
continue
project_number = row[0].value
initiative_code = project_number[:6]
reduced_row = self.get_reduced_row(row, False)
# datainizio = row[5].value
# datafine = row[6].value
try:
Project.objects.get(number=project_number)
except ObjectDoesNotExist:
# try to look for an activity with that code
activities = Activity.objects.filter(number=project_number)
if activities.count() == 0:
self.logger.error("Proj not found:'{}'".format(project_number))
not_found+=1
ws_pr_notfound.append(reduced_row)
except MultipleObjectsReturned:
self.logger.error("Multiple proj found:'{}'".format(project_number))
multiple+=1
ws_pr_notunique.append(reduced_row)
try:
Initiative.objects.get(code=initiative_code)
except ObjectDoesNotExist:
self.logger.error("Initiative not found found:'{}'".format(initiative_code))
initiative_counter +=1
ws_in_notfound.append(reduced_row)
self.logger.error("{} proj.number not found, {} proj.number not unique, {} initiative not found".format(not_found, multiple, initiative_counter))
if not_found > 0 or multiple > 0 or initiative_counter > 0:
self.logger.info(u"Error log file:{}".format(error_filename))
error_workbook.save(error_filename)
|
from django.contrib import admin
from .models import Template, Webpage, Comment, Like
# Register your models here.
admin.site.register(Template)
admin.site.register(Webpage)
admin.site.register(Comment)
admin.site.register(Like)
|
import plotly.graph_objects as go
import pandas as pd
import numpy as np
import plotly.io as pio
pio.renderers.default = "browser"
from asset_research.utils import get_orderbook_df
def realtime_orderbook_heatmap(orderbook_df, code=None, ):
if code is not None:
orderbook_df = orderbook_df[orderbook_df['code'] == code]
orderbook_df['svr_recv_time_ask'] = pd.to_datetime(orderbook_df['svr_recv_time_ask'])
orderbook_df['svr_recv_time_bid'] = pd.to_datetime(orderbook_df['svr_recv_time_bid'])
orderbook_df = orderbook_df.dropna(subset=['svr_recv_time_ask', 'svr_recv_time_bid']) # type:pd.DataFrame
# how to aggregate the map
orderbook_df['time_key'] = np.where(orderbook_df['svr_recv_time_ask'] > orderbook_df['svr_recv_time_bid'],
orderbook_df['svr_recv_time_ask'], orderbook_df['svr_recv_time_bid'])
orderbook_df = orderbook_df[['time_key', 'Bid', 'Ask']]
orderbook_df.set_index('time_key', inplace=True)
best_bid = orderbook_df['Bid'].apply(lambda x: x[0][0])
best_ask = orderbook_df['Ask'].apply(lambda x: x[0][0])
prices = np.array(orderbook_df["Bid"].agg(lambda x: np.array(x)[:, 0]).to_list()).flatten()
prices = np.append(prices, np.array(orderbook_df["Ask"].agg(lambda x: np.array(x)[:, 0]).to_list()).flatten())
prices = np.unique(prices)
grouped = orderbook_df
def col_func(x):
a = np.array(x)
count, value = np.histogram(a[:, 0], bins=prices, weights=a[:, 1])
c = count != 0
nonzero = count.nonzero()[0]
c[nonzero[0]: nonzero[-1]] = True
count = np.where(c, count, np.NAN)
return count
grouped['Bid'] = grouped['Bid'].apply(lambda x: col_func(x))
grouped['Ask'] = grouped['Ask'].apply(lambda x: col_func(x))
now = grouped.index[-1]
idx = pd.date_range(now, periods=60, freq='s')
future = pd.DataFrame(np.repeat(grouped.iloc[-1].values, 60, axis=0).reshape(2, 60).T, index=idx,
columns=grouped.columns)
grouped = grouped.append(future)
last_bid = best_bid.iloc[-1]
last_ask = best_ask.iloc[-1]
fb = pd.Series(np.repeat(last_bid, 60), index=idx)
fa = pd.Series(np.repeat(last_ask, 60), index=idx)
best_bid = best_bid.append(fb)
best_ask = best_ask.append(fa)
best_bid_plot = go.Scatter(x=best_bid.index, y=best_bid.values, mode='lines', line_color='#00FF00', line_width=6)
best_ask_plot = go.Scatter(x=best_ask.index, y=best_ask.values, mode='lines', line_color='#FF0000', line_width=6)
bid = go.Heatmap(
z=np.array(grouped['Bid'].to_list()), zmin=0, zmax=10,
x=grouped['Bid'].index,
y=prices, transpose=True,
colorscale='magma', showscale=False)
ask = go.Heatmap(
z=np.array(grouped['Ask'].to_list()), zmin=0, zmax=10,
x=grouped['Ask'].index,
y=prices, transpose=True,
colorscale='magma', showscale=False, )
fig = go.Figure([bid, ask, best_bid_plot, best_ask_plot])
fig.update_layout(yaxis_tickformat='g', template='plotly_dark')
fig.update_layout(
shapes=[dict(
x0=now, x1=now, y0=0, y1=1, xref='x', yref='paper',
line_width=2)],
annotations=[dict(
x=now, y=0.05, xref='x', yref='paper',
showarrow=False, xanchor='left', text='Live')]
)
# fig.show()
return fig
def realtime_orderflow(tick_df, code=None, ):
pass
if __name__ == '__main__':
ob_df = get_orderbook_df('/Users/liujunyue/PycharmProjects/alphaFactory/asset_research/2020-07-31.json')
ob_df = ob_df[1000: 1500]
realtime_orderbook_heatmap(ob_df, code='HK.999010').show()
|
from django import forms
from account.models import AccountGroup
class EditForm(forms.Form):
name = forms.CharField(
max_length=16, required=True,
widget=forms.TextInput(attrs={'size': 16}))
info = forms.CharField(
max_length=64, required=False,
widget=forms.TextInput(attrs={'size': 64}))
class DeleteForm(forms.Form):
def __init__(self, *args, **kwargs):
super(DeleteForm, self).__init__(*args, **kwargs)
self.fields['account_group_id'] = forms.TypedMultipleChoiceField(
choices=tuple((x['id'], x['name']) for x in
AccountGroup.objects.values('id', 'name')),
required=True)
class ImportForm(forms.Form):
file = forms.FileField()
class ImportRowForm(EditForm):
pass
|
#
# -*- coding: <utf-8> -*-
#
import urllib2
from lib.sonos.soco import SoCo
from lib.sonos.soco import SonosDiscovery
import lib.feedparser as feedparser
from core.Logger import log
sonos_devices = SonosDiscovery()
class Sonos:
def GetDeviceList(self):
info = {}
for ip in sonos_devices.get_speaker_ips():
device = SoCo(ip)
zone_name = device.get_speaker_info()['zone_name']
if zone_name != None:
info[zone_name] = ip
log('Function [GetDeviceList: %s ]'% (info.items()), 'debug')
return info.items()
def GetTrackInfo(self):
self.art = {}
sonoslist = self.GetDeviceList()
#try:
for sonos in sonoslist:
sonosdevice = SoCo(sonos[1])
self.track = sonosdevice.get_current_track_info()
self.serial = sonosdevice.get_speaker_info()['serial_number']
self.album_art_url = self.track['album_art'].encode('utf-8')
try:
self.album_artist = self.track['artist'].encode('utf-8')
except:
self.album_artist = ""
try:
self.title = self.track['title'].encode('utf-8')
except:
self.title = ""
try:
self.album = self.track['album'].encode('utf-8')
except:
self.album = ""
if self.track['duration'] == '0:00:00' and self.album == '':
try:
""" First we must parse the streamurl from 1400:status/radiolog
to become the id from stream. After that we can parse the logo information
from opml.radiotime.com """
self.url = "http://" + str(sonos[1]) + ":1400/status/radiolog"
self.response = urllib2.urlopen(self.url, timeout=20)
self.data = self.response.read()
self.response.close()
self.r = feedparser.parse(self.data)
self.stream = self.r.entries[0]['href']
self.id = self.stream.split('&')[0].split('?')[1]
self.xml= "http://opml.radiotime.com/Describe.ashx?c=nowplaying&%s&partnerId=Sonos&serial=%s"% (self.id, self.serial)
self.response = urllib2.urlopen(self.xml, timeout=20)
self.data = self.response.read()
self.response.close()
self.album_art_url= feedparser.parse(self.data).feed['summary']
self.album_art_url = self.album_art_url.split(' ')[0].replace('.png', 'q.png')
except:
pass
# Cache the file
filename = "/mnt/Media/Downloads/Homematic/data/cache/%s.jpg"% (sonos[0])
try:
f = open(filename,'wb')
f.write(urllib2.urlopen(self.album_art_url, timeout=20).read())
f.close()
self.album_art_url = "cache/%s.jpg"% (sonos[0])
except:
self.album_art_url = "cache/nocover.png"
self.art[sonos[0]] = sonos[0], sonos[1], self.album_art_url, self.title, self.album, self.album_artist
#except:
# return
log('Function [GetTrackInfo : %s ]'% (self.art), 'debug')
return self.art
def SonosFunctions(self, zonenip, function, value=''):
sonos = SoCo(zonenip)
func = getattr(sonos,function)
if value == '':
func()
else:
func(value)
log('Function %s for %s IP'% (function, zonenip), 'debug')
|
# -*- coding: utf-8 -*-
# cuadrics
from sage.all import matrix,var,vector,solve,det
"""Returns whether matrix m is symmetric (assuming it is square)"""
def symmetric(m):
for i in range(m.nrows()):
for j in range(i, m.ncols()):
if m[i][j] != m[j][i]:
return False
return True
"""Class representing a cuadric through its matrix."""
class cuadric:
def __init__(self, m):
self.matrix = matrix(m)
self.dim = self.matrix.nrows()
if(self.dim != self.matrix.ncols() or not symmetric(self.matrix)):
raise ValueError("Matrix must be square and symmetric.")
self.det = det(self.matrix)
#This function is considerably slow and should only be used to probe values
def points(self):
vars = [var('x' + str(i)) for i in range(self.dim)]
x = vector(vars)
return solve(x*self.matrix*x==0, vars)
def isDegenerate(self):
return det(self.matrix) == 0
def explicit_equation(self):
vars = [var('x' + str(i)) for i in range(self.matrix.ncols())]
eq = vector(vars)*self.matrix*vector(vars)==0
return eq.full_simplify() |
from django.contrib.auth.models import User
from django.test import TestCase
class TestListTweets(TestCase):
def setUp(self) -> None:
user = User.objects.create_user(username='user_test', email='test@gmail.com', password='2DF1SD2d2D2@D')
self.client.login(username='user_test', password='2DF1SD2d2D2@D')
self.data = {'hashtag': '#python', 'user_id': user.pk}
self.resp = self.client.post('/create_hashtag', self.data)
def test_fail_updates_tweets(self):
resp = self.client.get('/ajax_call/update_tweets')
resp = resp.json()
self.assertEqual(resp['status'], 'fail')
def test_update_tweets(self):
resp = self.client.get('/ajax_call/update_tweets', HTTP_X_REQUESTED_WITH='XMLHttpRequest')
resp = resp.json()
self.assertEqual(resp['status'], 'success')
def test_list_tweets(self):
resp = self.client.get('/')
self.assertTemplateUsed(resp, 'list_tweets.html')
|
#============================================
# Title: Assignment 9.2
# Author: Don Cousar
# Date: 29 June 2019
# Description: Querying and Creating Documents
#===========================================
# Imports
from pymongo import MongoClient
import pprint
import datetime
# Connect to local MongoDB
client = MongoClient('localhost', 27017)
print(client)
db = client.web335
user = {
"first_name": "Claude",
"last_name": "Debussy",
"email": "cdebussy@me.com",
"employee_id": "0000008",
"date_created": datetime.datetime.utcnow()
}
# Insert User
user_id = db.users.insert_one(user).inserted_id
# Print insert statement
print(user_id)
# Query user
pprint.pprint(db.users.find_one({"employee_id": "0000008"})) |
# utf-8
# PA
termo1 = int(input('Digite o primeiro termo: '))
razao = int(input('Digite a razão: '))
termos = int(input('Digite quantos termos deseja ver: '))
res1 = 0
x = 0
while True:
while x < termos:
print(termo1, end='')
print(end=' → ' if x < termos -1 else print(end=' → PAUSA'))
termo1 += razao
x += 1
y = 0
res = int(input('Deseja quantos termos mais: '))
res1 += res
if res != 0:
while y < res:
print(termo1, end='')
print(end=' → ' if y < res - 1 else print(end=' → PAUSA'))
termo1 += razao
y += 1
else:
print('Progressão finalizada com {} termos mostrados.'.format(termos+res1))
break
|
# -*- encoding:utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
def doc_upload_url():
parts = (settings.DRIVER_APP_URL, '#', 'bookings')
return '/'.join(parts)
def car_listing_url():
parts = (settings.DRIVER_APP_URL, '#', 'listings')
return '/'.join(parts)
def car_details_url(car):
parts = (car_listing_url(), unicode(car.pk))
return '/'.join(parts)
def bookings():
parts = (settings.DRIVER_APP_URL, '#', 'account', 'bookings')
return '/'.join(parts)
def password_reset(password_reset):
parts = (settings.DRIVER_APP_URL, '#', 'reset_password', password_reset.token)
return '/'.join(parts)
def driver_account():
parts = (settings.DRIVER_APP_URL, '#', 'account')
return '/'.join(parts)
def driver_login():
return bookings()
def driver_signup():
parts = (settings.DRIVER_APP_URL, '#', 'users', 'new', 'phone_number')
return '/'.join(parts)
# TODO - remove this
def owner_password_reset(password_reset):
parts = (settings.DRIVER_APP_URL, '#', 'owner_reset_password', password_reset.token)
return '/'.join(parts)
def terms_of_service():
parts = (settings.DRIVER_APP_URL, '#', 'terms_of_service')
return '/'.join(parts)
def faq():
parts = (settings.DRIVER_APP_URL, '#', 'faq')
return '/'.join(parts)
|
#This code organizes the individual output files for HFBTHOv300 into two separate files:
# 1. HFBTHOv300_"functional-name"_All_Data.dat (containing the data from every constrained calculation)
# 2. HFBTHOv300_"functional-name"_Ground_State_Data.dat (containing the ground state data for each nucleus from all of the constrained calculations)
#
#The input files are assumed to have the following form: thoout_000_000001.dat
#where "000" is the Team ID and "000001" is the File ID
#
#To run the code, type the following into the terminal:
#
#python SortHFBTHOv300.py "Name_of_functional" "Lowest_Team_ID_Number" "Highest_Team_ID_Number" "Lowest_File_ID_Number" "Highest_File_ID_Number"
import math
import os
import sys
import decimal
import re #Real expressions
import subprocess #Shell commands
def ElementName(Z):
#=======================
#Labels for each element
#=======================
if (Z == 1):
elementlabel = 'H'
elif (Z == 2):
elementlabel = 'He'
elif (Z == 3):
elementlabel = 'Li'
elif (Z == 4):
elementlabel = 'Be'
elif (Z == 5):
elementlabel = 'B'
elif (Z == 6):
elementlabel = 'C'
elif (Z == 7):
elementlabel = 'N'
elif (Z == 8):
elementlabel = 'O'
elif (Z == 9):
elementlabel = 'F'
elif (Z == 10):
elementlabel = 'Ne'
elif (Z == 11):
elementlabel = 'Na'
elif (Z == 12):
elementlabel = 'Mg'
elif (Z == 13):
elementlabel = 'Al'
elif (Z == 14):
elementlabel = 'Si'
elif (Z == 15):
elementlabel = 'P'
elif (Z == 16):
elementlabel = 'S'
elif (Z == 17):
elementlabel = 'Cl'
elif (Z == 18):
elementlabel = 'Ar'
elif (Z == 19):
elementlabel = 'K'
elif (Z == 20):
elementlabel = 'Ca'
elif (Z == 21):
elementlabel = 'Sc'
elif (Z == 22):
elementlabel = 'Ti'
elif (Z == 23):
elementlabel = 'V'
elif (Z == 24):
elementlabel = 'Cr'
elif (Z == 25):
elementlabel = 'Mn'
elif (Z == 26):
elementlabel = 'Fe'
elif (Z == 27):
elementlabel = 'Co'
elif (Z == 28):
elementlabel = 'Ni'
elif (Z == 29):
elementlabel = 'Cu'
elif (Z == 30):
elementlabel = 'Zn'
elif (Z == 31):
elementlabel = 'Ga'
elif (Z == 32):
elementlabel = 'Ge'
elif (Z == 33):
elementlabel = 'As'
elif (Z == 34):
elementlabel = 'Se'
elif (Z == 35):
elementlabel = 'Br'
elif (Z == 36):
elementlabel = 'Kr'
elif (Z == 37):
elementlabel = 'Rb'
elif (Z == 38):
elementlabel = 'Sr'
elif (Z == 39):
elementlabel = 'Y'
elif (Z == 40):
elementlabel = 'Zr'
elif (Z == 41):
elementlabel = 'Nb'
elif (Z == 42):
elementlabel = 'Mo'
elif (Z == 43):
elementlabel = 'Tc'
elif (Z == 44):
elementlabel = 'Ru'
elif (Z == 45):
elementlabel = 'Rh'
elif (Z == 46):
elementlabel = 'Pd'
elif (Z == 47):
elementlabel = 'Ag'
elif (Z == 48):
elementlabel = 'Cd'
elif (Z == 49):
elementlabel = 'In'
elif (Z == 50):
elementlabel = 'Sn'
elif (Z == 51):
elementlabel = 'Sb'
elif (Z == 52):
elementlabel = 'Te'
elif (Z == 53):
elementlabel = 'I'
elif (Z == 54):
elementlabel = 'Xe'
elif (Z == 55):
elementlabel = 'Cs'
elif (Z == 56):
elementlabel = 'Ba'
elif (Z == 57):
elementlabel = 'La'
elif (Z == 58):
elementlabel = 'Ce'
elif (Z == 59):
elementlabel = 'Pr'
elif (Z == 60):
elementlabel = 'Nd'
elif (Z == 61):
elementlabel = 'Pm'
elif (Z == 62):
elementlabel = 'Sm'
elif (Z == 63):
elementlabel = 'Eu'
elif (Z == 64):
elementlabel = 'Gd'
elif (Z == 65):
elementlabel = 'Tb'
elif (Z == 66):
elementlabel = 'Dy'
elif (Z == 67):
elementlabel = 'Ho'
elif (Z == 68):
elementlabel = 'Er'
elif (Z == 69):
elementlabel = 'Tm'
elif (Z == 70):
elementlabel = 'Yb'
elif (Z == 71):
elementlabel = 'Lu'
elif (Z == 72):
elementlabel = 'Hf'
elif (Z == 73):
elementlabel = 'Ta'
elif (Z == 74):
elementlabel = 'W'
elif (Z == 75):
elementlabel = 'Re'
elif (Z == 76):
elementlabel = 'Os'
elif (Z == 77):
elementlabel = 'Ir'
elif (Z == 78):
elementlabel = 'Pt'
elif (Z == 79):
elementlabel = 'Au'
elif (Z == 80):
elementlabel = 'Hg'
elif (Z == 81):
elementlabel = 'Tl'
elif (Z == 82):
elementlabel = 'Pb'
elif (Z == 83):
elementlabel = 'Bi'
elif (Z == 84):
elementlabel = 'Po'
elif (Z == 85):
elementlabel = 'At'
elif (Z == 86):
elementlabel = 'Rn'
elif (Z == 87):
elementlabel = 'Fr'
elif (Z == 88):
elementlabel = 'Ra'
elif (Z == 89):
elementlabel = 'Ac'
elif (Z == 90):
elementlabel = 'Th'
elif (Z == 91):
elementlabel = 'Pa'
elif (Z == 92):
elementlabel = 'U'
elif (Z == 93):
elementlabel = 'Np'
elif (Z == 94):
elementlabel = 'Pu'
elif (Z == 95):
elementlabel = 'Am'
elif (Z == 96):
elementlabel = 'Cm'
elif (Z == 97):
elementlabel = 'Bk'
elif (Z == 98):
elementlabel = 'Cf'
elif (Z == 99):
elementlabel = 'Es'
elif (Z == 100):
elementlabel = 'Fm'
elif (Z == 101):
elementlabel = 'Md'
elif (Z == 102):
elementlabel = 'No'
elif (Z == 103):
elementlabel = 'Lr'
elif (Z == 104):
elementlabel = 'Rf'
elif (Z == 105):
elementlabel = 'Db'
elif (Z == 106):
elementlabel = 'Sg'
elif (Z == 107):
elementlabel = 'Bh'
elif (Z == 108):
elementlabel = 'Hs'
elif (Z == 109):
elementlabel = 'Mt'
elif (Z == 110):
elementlabel = 'Ds'
elif (Z == 111):
elementlabel = 'Rg'
elif (Z == 112):
elementlabel = 'Cn'
elif (Z == 113):
elementlabel = 'Nh'
elif (Z == 114):
elementlabel = 'Fl'
elif (Z == 115):
elementlabel = 'Mc'
elif (Z == 116):
elementlabel = 'Lv'
elif (Z == 117):
elementlabel = 'Ts'
elif (Z == 118):
elementlabel = 'Og'
elif (Z == 119):
elementlabel = 'Uue'
elif (Z == 120):
elementlabel = 'Ubn'
#========================
#Outputs the element name
#========================
return elementlabel
#=====================================================================================================================
#This function takes all of the HFBTHOv300 output files from 'masstable' mode and puts the relevant data into one file
#=====================================================================================================================
def Read_HFBTHO_Masstable(thoout,bl,Output_Dict, Incomp_No, Incomp_Other):
#==============================================================================================================
#Goes through every available "thoout" output file, extracts useful data, and puts it onto the list Output_List
#==============================================================================================================
lines = open(thoout,encoding="ISO-8859-1").readlines()
tho_name = thoout.split(".dat")[0]
tho_name = tho_name.split("_")[-1]
file_ID = bl + "-" + tho_name
# Initialize variables
convergence = "YES"; cpu_count=0
N, Z, BE = 9999,9999,9999
pairing_gap_N, pairing_gap_P = 0,0
rms_radius_N, rms_radius_P, rms_radius_T = 0,0,0
charge_radius, quad_def_beta2_N, quad_def_beta2_P, quad_def_beta2_T = 0,0,0,0
quad_moment_Q2_N, quad_moment_Q2_P, quad_moment_Q2_T = 0,0,0
oct_moment_Q3_N, oct_moment_Q3_P, oct_moment_Q3_T = 0,0,0
for line in lines:
if "iterations limit interrupt after1001" in line: convergence = "NO"
if "CPU" in line: cpu_count += 1
ss = line.split()
try:
#-----------------------------------------
#Identifies the proton and neutron numbers
#-----------------------------------------
if (ss[0] == "Requested"):
N = int(float(ss[2]) + 0.0001)
Z = int(float(ss[3]) + 0.0001)
#---------------------------
#Identifies the pairing gaps
#---------------------------
elif((ss[0] == "delta(n,p),") and (ss[1] == "pwi")):
pairing_gap_N = float(ss[3]) #Neutron pairing gap
pairing_gap_P = float(ss[4]) #Proton pairing gap
#------------------------
#Identifies the rms-radii
#------------------------
elif ((ss[0] == "rms-radius") and (ss[1] == "..........")):
rms_radius_N = float(ss[2]) #Neutron rms radius
rms_radius_P = float(ss[3]) #Proton rms radius
rms_radius_T = float(ss[4]) #Total rms radius
#----------------------------
#Identifies the charge radius
#----------------------------
elif ((ss[0] == "charge-radius,") and (ss[1] == "r0")):
charge_radius = float(ss[3]) #Charge radius
#------------------------------------------------
#Identifies the quadrupole deformation parameters
#------------------------------------------------
elif((ss[0] == "deformation") and (ss[1] == "beta")):
quad_def_beta2_N = float(ss[3]) #Neutron quadrupole deformation parameter
quad_def_beta2_P = float(ss[4]) #Proton quadrupole deformation parameter
quad_def_beta2_T = float(ss[5]) #Total quadrupole deformation parameter
#---------------------------------
#Identifies the quadrupole moments
#---------------------------------
elif((ss[0] == "quadrupole") and (ss[1] == "moment[b]")) and not quad_moment_Q2_T:
# This gathers no LN deformation, these are needed for constraint calculation benchmark
# Current HFBTHO has constraint on no LN deformation.
quad_moment_Q2_N = float(ss[2]) #Neutron quadrupole moment
quad_moment_Q2_P = float(ss[3]) #Proton quadrupole moment
quad_moment_Q2_T = float(ss[4]) #Total quadrupole moment
#-------------------------------
#Identifies the octupole moments
#-------------------------------
elif((ss[0] == "octupole") and (ss[1] == "moment")) and not oct_moment_Q3_T:
# This gathers no LN deformation, these are needed for constraint calculation benchmark
# Current HFBTHO has constraint on no LN deformation.
oct_moment_Q3_N = float(ss[3]) #Neutron octupole moment
oct_moment_Q3_P = float(ss[4]) #Proton octupole moment
oct_moment_Q3_T = float(ss[5]) #Total octupole moment
#-----------------------------
#Identifies the binding energy
#-----------------------------
elif ((ss[0] == 'tEnergy:') and (ss[1] == 'ehfb(qp)+LN')):
BE = float(ss[2]) #Binding Energy
#---------------------------------------------------------
#No useful pieces of information, moves onto the next line
#---------------------------------------------------------
else:
continue
except IndexError:
continue
if Z > 200: return
if cpu_count != 2: convergence = "***"
if (Z,N) not in Output_Dict: Output_Dict[(Z,N)] = []
Output_Dict[(Z,N)].append((Z,N,BE,quad_def_beta2_P,quad_def_beta2_N,quad_def_beta2_T,quad_moment_Q2_P,quad_moment_Q2_N,quad_moment_Q2_T,oct_moment_Q3_P,oct_moment_Q3_N,oct_moment_Q3_T,rms_radius_P,
rms_radius_N,rms_radius_T,charge_radius,pairing_gap_N,pairing_gap_P,file_ID,convergence))
if convergence == "NO":
Incomp_No.append((Z,N,file_ID,"No convergence"))
if convergence == "***":
Incomp_Other.append((Z,N,file_ID,"No convergence other"))
return
#===========
#User Inputs
#===========
EDFs = ['SLY4', 'SV-MIN', 'UNEDF0', 'UNEDF1', 'UNEDF2'] # 'SKMS', 'SKP', 'UNEDF1-SO'
number_of_shells = 20
for functional in EDFs:
# Locate block directories
os.system("shopt -s extglob\n"+"rm HFBTHOv300_"+functional+"*.dat")
os.chdir(functional)
block_ls = os.listdir()
blocks = []
for bl in block_ls:
if 'block' in bl and "." not in bl:
blocks.append(bl)
Output_Dict = {} #Dict for output data
Incomp_No, Incomp_Other = [], []
#----------------------------------------------------------
#Writes and properly formats the titles for the output file
#----------------------------------------------------------
all_data_str = '{:6} {:6} {:9} {:23} {:20} {:22} {:20} {:26} {:30} {:27} {:31} {:34} {:34} {:23} {:21} {:20} {:22} {:22} {:13} {:20} {:6} \n'.format(
'Z', 'N', 'A', 'Binding_Energy_(MeV)', 'Quad_Def_Beta2_P', 'Quad_Def_Beta2_N', 'Quad_Def_Beta2_total', 'Quad_Moment_Q2_P_(fm^2)', 'Quad_Moment_Q2_N_(fm^2)',
'Quad_Moment_Q2_total_(fm^2)', 'Octupole_Moment_Q3_P_(fm^3)', 'Octupole_Moment_Q3_N_(fm^3)', 'Octupole_Moment_Q3_total_(fm^3)', 'Pairing_gap_P_(MeV)',
'Pairing_gap_N_(MeV)', 'RMS_radius_P_(fm)', 'RMS_radius_N_(fm)', 'RMS_radius_total_(fm)', 'Charge_Radius_(fm)', 'File_ID',"Converged")
for bl in blocks:
os.chdir(bl)
tho_ls = os.listdir()
tho_list = []
for fn in tho_ls:
if "thoout" in fn and ".dat" in fn:
tho_list.append(fn)
print (functional,"\t",bl, "\tFile Count: ", len(tho_list))
for ind,thoout in enumerate(tho_list):
if not (ind+1) % 1000 or ind+1 == len(tho_list): print (ind+1,"/",len(tho_list))
Read_HFBTHO_Masstable(thoout,bl,Output_Dict, Incomp_No, Incomp_Other)
os.chdir("..")
# All data of a single EDF should be stored in Output_Dict at this point, now we sort in order of Z,N,BE
for key in sorted(Output_Dict):
print (key)
nuc_all = Output_Dict[key]
# Sort on binding energy
for entry in sorted(nuc_all, key = lambda x:x[2]):
Z, N, BE = entry[0], entry[1], entry[2]
file_ID, convergence = entry[18], entry[19]
quad_def_beta2_P, quad_def_beta2_N, quad_def_beta2_T = entry[3],entry[4],entry[5]
quad_moment_Q2_P, quad_moment_Q2_N, quad_moment_Q2_T = entry[6],entry[7],entry[8]
oct_moment_Q3_P, oct_moment_Q3_N, oct_moment_Q3_T = entry[9],entry[10],entry[11]
rms_radius_P, rms_radius_N, rms_radius_T = entry[12],entry[13],entry[14]
charge_radius, pairing_gap_N, pairing_gap_P = entry[15],entry[16],entry[17]
all_data_str += '{:6} {:6} {:9} {:23} {:20} {:22} {:20} {:26} {:30} {:27} {:31} {:34} {:34} {:23} {:21} {:20} {:22} {:22} {:13} {:20} {:6}\n'.format(
str(Z), str(N), str(Z+N), str(BE).rjust(13, ), str(quad_def_beta2_P).rjust(10, ), str(quad_def_beta2_N).rjust(10, ), str(quad_def_beta2_T).rjust(10, ),
str(quad_moment_Q2_P).rjust(12, ), str(quad_moment_Q2_N).rjust(12, ), str(quad_moment_Q2_T).rjust(12, ), str(oct_moment_Q3_P).rjust(12, ),
str(oct_moment_Q3_N).rjust(12, ), str(oct_moment_Q3_T).rjust(12, ), str(pairing_gap_P).rjust(10, ), str(pairing_gap_N).rjust(10, ),
str(rms_radius_P).rjust(10, ), str(rms_radius_N).rjust(10, ), str(rms_radius_T).rjust(10, ), str(charge_radius).rjust(10, ), str(file_ID).rjust(12, ), str(convergence).rjust(6,))
os.chdir("..")
Data_File_Out = "HFBTHOv300_"+functional+"_All_Data_"+str(number_of_shells)+"_shells_no_LN_deformation-masstable.dat" #Output file for Read_HFBTHO_Masstable_Output
all_data_output = open(Data_File_Out, "w") #Output file for all data
all_data_output.write(all_data_str)
all_data_output.close()
print ("Incomplete:\n")
for inp in Incomp_No:
print (inp[0],"\t",inp[1],"\t",inp[2])
print ("Incomplete Other:\n")
for inp in Incomp_Other:
print (inp[0],"\t",inp[1],"\t",inp[2])
|
import urllib2
from bs4 import BeautifulSoup
# Find and open the URL to scrape
url = 'http://ire.org/conferences/nicar-2014/schedule/'
html = urllib2.urlopen(url).read()
# Open an output file to put our scraper results
outfile = open('nicar_2014.csv', 'a')
# Use BeautifulSoup to extract the course/panel list
# from the schedule page.
# Start by putting each of the daily schedule
# tabs on the page into a list, so we can loop
# over them one at a time.
soup = BeautifulSoup(html)
pane_uls = soup.findAll("ul", "listview pane")
hr = '\n' + ('-' * 30) + '\n'
outfile.write('Title|Location|Time\n')
# Loop through each of the panes ...
for pane in pane_uls:
# And then loop through each schedule item in each pane.
for li in pane.findAll('li'):
# If that schedule item is a hands-on class ...
if li.find('div', "col-10 heading5").text == 'Hands-on' or 'Panel':
title = li.find('h3').text.encode('utf-8').strip()
place, time = li.find('div', 'col-15 meta').find_all('p')
output = title + '|' + place.get_text().encode('utf-8') + '|' + time.get_text().encode('utf-8') + '\n'
print output
outfile.write(output)
|
from abc import ABC
from modules import ModuleBase
import json
import traceback
from utils.log import init_logger
logger = init_logger(__name__)
class ParseDataModule(ModuleBase, ABC):
def __init__(self):
super(ParseDataModule, self).__init__()
self.script = ""
def init_custom_variables(self):
self.script = self.get_custom_variables()['script']
self.script = self.script + "\nres = parse_data(data)"
def binding_callback(self, ch, method, properties, body):
data = json.loads(body.decode("utf-8"))
local = {}
try:
exec(self.script,{"data":data},local)
res = local['res']
self.emit(json.dumps(res))
except BaseException as e:
self.emit(traceback.format_exc(), log=True)
traceback.print_exc()
logger.error(e)
pass
pass
|
from gluoncv import model_zoo
from mxnet.gluon import nn, HybridBlock
from mxnet import init
import mxnet as mx
class fashion_net_2_branches(HybridBlock):
def __init__(self, num_clothes, num_colors, ctx):
super(fashion_net_2_branches, self).__init__()
self._features = model_zoo.get_model('mobilenetv2_1.0', pretrained=True, ctx = ctx).features
for _, w in self._features.collect_params().items():
w.grad_req = 'null'
self._flatten = nn.Flatten()
self._relu = nn.Activation(activation='relu')
self._swish = nn.Swish()
self._clothes_fc_1 = nn.Dense(100)
self._clothes_bn = nn.BatchNorm(center=False, scale=True)
self._clothes_out = nn.Dense(num_clothes)
self._clothes_fc_1.initialize(init=init.Xavier(), ctx=ctx)
self._clothes_bn.initialize(init=init.Zero(), ctx=ctx)
self._clothes_out.initialize(init=init.Xavier(), ctx=ctx)
self._color_fc_1 = nn.Dense(100)
self._color_bn_1 = nn.BatchNorm(center=False, scale=True)
self._color_fc_2 = nn.Dense(50)
self._color_bn_2 = nn.BatchNorm(center=False, scale=True)
self._color_out = nn.Dense(num_colors)
self._color_fc_1.initialize(init=init.Xavier(), ctx=ctx)
self._color_bn_1.initialize(init=init.Zero(), ctx=ctx)
self._color_fc_2.initialize(init=init.Xavier(), ctx=ctx)
self._color_bn_2.initialize(init=init.Zero(), ctx=ctx)
self._color_out.initialize(init=init.Xavier(), ctx=ctx)
def hybrid_forward(self, F, x):
x = self._features(x)
clothes_result = self._flatten(x)
clothes_result = self._clothes_fc_1(clothes_result)
clothes_result = self._swish(clothes_result)
clothes_result = self._clothes_bn(clothes_result)
clothes_result = self._clothes_out(clothes_result)
color_result = self._flatten(x)
color_result = self._color_fc_1(color_result)
color_result = self._swish(color_result)
color_result = self._color_bn_1(color_result)
color_result = self._color_fc_2(color_result)
color_result = self._swish(color_result)
color_result = self._color_bn_2(color_result)
color_result = self._color_out(color_result)
return clothes_result, color_result |
# Question
# Suppose you are at a party with n people (labeled from 0 to n - 1) and among them, there may exist one celebrity.
# The definition of a celebrity is that all the other n - 1 people know him/her but he/she does not know any of them.
# Now you want to find out who the celebrity is or verify that there is not one.
# The only thing you are allowed to do is to ask questions like: "Hi, A. Do you know B?" to get information of whether A knows B.
# You need to find out the celebrity (or verify there is not one) by asking as few questions as possible (in the asymptotic sense).
# You are given a helper function bool knows(a, b) which tells you whether A knows B.
# Implement a function int findCelebrity(n), your function should minimize the number of calls to knows.
# Note: There will be exactly one celebrity if he/she is in the party. Return the celebrity's label if there is a celebrity in the party.
class Solution:
def find_celeb2(n):
"""
We can use the 2 Pointer Technique to solve this problem.
O(n) Time, O(1) Space.
In this solution, we compare pairs of people to see if one knows the other.
We maintain two pointers (left and right) corresponding to people,
initialized to the beginning and end of the list.
We know if left knows right, then left cannot be the celebrity, so we increment left.
We also know if left does not know right, then right cannot be the celebrity,
so we decrement right. This continues until the pointers are the same.
This is the only candidate celebrity, so we perform a final check to see if this
candidate knows no one and everyone knows the candidate (since we don't do all
checks while searching for the candidate).
"""
left_idx, right_idx = 0, n - 1
while left_idx < right_idx:
if knows(left_idx, right_idx):
left_idx += 1
else:
right_idx -= 1
for idx in range(n):
if idx == left_idx:
continue
if knows(left_idx, idx):
return -1
if not knows(idx, left_idx):
return -1
return left_idx
|
import datetime
import json
import logging
import webapp2
from google.appengine.api.app_identity import app_identity
from src.commons.config.configuration import configuration
from src.datastore_export.export_datastore_to_big_query_service import \
ExportDatastoreToBigQueryService
class ExportDatastoreToBigQueryHandler(webapp2.RequestHandler):
def get(self):
now = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
date = now.split("_")[0]
gcs_output_uri = self.__create_gcs_output_url(now)
kinds = self.request.get_all('kind')
logging.info("Scheduling export of Datastore backups to Big Query...")
service = ExportDatastoreToBigQueryService(date)
finished_with_success = service.export(gcs_output_uri, kinds)
self.__parse_result(finished_with_success)
def __parse_result(self, finished_with_success):
http_status = 200 if finished_with_success else 500
response_status = "success" if finished_with_success else "failed"
self.response.set_status(http_status)
self.response.out.write(json.dumps({'status': response_status}))
self.response.headers['Content-Type'] = 'application/json'
if finished_with_success:
logging.info("Export of DS entities to BQ finished successfully.")
else:
logging.warning(
"Export of DS entities to BQ finished with some warnings.")
@staticmethod
def __create_gcs_output_url(gcs_folder_name):
app_id = app_identity.get_application_id()
output_url_prefix = "gs://staging.{}.appspot.com/{}" \
.format(app_id, gcs_folder_name)
return output_url_prefix
app = webapp2.WSGIApplication([
webapp2.Route('/cron/export-datastore-to-big-query',
ExportDatastoreToBigQueryHandler)
], debug=configuration.debug_mode)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.