text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class SalePlanInfo(object):
def __init__(self):
self._custom_price_desc = None
self._main_ps_id = None
self._price_desc = None
self._price_type = None
self._ps_id = None
@property
def custom_price_desc(self):
return self._custom_price_desc
@custom_price_desc.setter
def custom_price_desc(self, value):
self._custom_price_desc = value
@property
def main_ps_id(self):
return self._main_ps_id
@main_ps_id.setter
def main_ps_id(self, value):
self._main_ps_id = value
@property
def price_desc(self):
return self._price_desc
@price_desc.setter
def price_desc(self, value):
self._price_desc = value
@property
def price_type(self):
return self._price_type
@price_type.setter
def price_type(self, value):
self._price_type = value
@property
def ps_id(self):
return self._ps_id
@ps_id.setter
def ps_id(self, value):
self._ps_id = value
def to_alipay_dict(self):
params = dict()
if self.custom_price_desc:
if hasattr(self.custom_price_desc, 'to_alipay_dict'):
params['custom_price_desc'] = self.custom_price_desc.to_alipay_dict()
else:
params['custom_price_desc'] = self.custom_price_desc
if self.main_ps_id:
if hasattr(self.main_ps_id, 'to_alipay_dict'):
params['main_ps_id'] = self.main_ps_id.to_alipay_dict()
else:
params['main_ps_id'] = self.main_ps_id
if self.price_desc:
if hasattr(self.price_desc, 'to_alipay_dict'):
params['price_desc'] = self.price_desc.to_alipay_dict()
else:
params['price_desc'] = self.price_desc
if self.price_type:
if hasattr(self.price_type, 'to_alipay_dict'):
params['price_type'] = self.price_type.to_alipay_dict()
else:
params['price_type'] = self.price_type
if self.ps_id:
if hasattr(self.ps_id, 'to_alipay_dict'):
params['ps_id'] = self.ps_id.to_alipay_dict()
else:
params['ps_id'] = self.ps_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SalePlanInfo()
if 'custom_price_desc' in d:
o.custom_price_desc = d['custom_price_desc']
if 'main_ps_id' in d:
o.main_ps_id = d['main_ps_id']
if 'price_desc' in d:
o.price_desc = d['price_desc']
if 'price_type' in d:
o.price_type = d['price_type']
if 'ps_id' in d:
o.ps_id = d['ps_id']
return o
|
#!/usr/bin/env python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
ringwidth = 3
data = pd.read_csv('thickness.txt',delim_whitespace=True,usecols=(1,2),names=['ring','thickness'])
data['ring'] = data['ring'] * ringwidth
sns.lineplot(x='ring',y='thickness',data=data,err_style='bars')
plt.title('Thickness of Membrane Radially From Protein')
plt.xlabel('Distance from Protein (A)')
plt.ylabel('Thickenss (A)')
plt.show()
|
from django.db import models
from django.conf import settings
from django.urls import reverse
import datetime
# Create your models here.
class Order(models.Model):
CATEGORY=(
('COMPUTER','COMPUTER'),
('FURNITURE','FURNITURE'),
('OFFICE_EQUIPMENT','OFFICE EQUIPMENT'),
('LINK_EQUIPMENTS','LINK_EQUIPMENTS'),
('SERVER','SERVER'),
)
description=models.TextField(max_length=100)
category=models.TextField(choices=CATEGORY)
cost=models.DecimalField(max_digits=10,decimal_places=2)
quantity=models.IntegerField(default=1)
total_cost=models.DecimalField(default=0,max_digits=10,decimal_places=2)
order_date=models.DateField(default=datetime.date.today)
status=models.TextField(max_length=100,default="unapproved")
comments=models.TextField(max_length=200,default='None')
document=models.FileField(null=True,upload_to='documents/%Y/%m/%d/')
user=models.ForeignKey(
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
supplier=models.ForeignKey(
to='Supplier',
on_delete=models.CASCADE
)
def approve(self):
self.status='Approved'
self.save()
def reject(self):
self.status='Rejected'
self.save()
def total_cost(self):
self.total_cost=self.cost*self.quantity
return self.total_cost
def __str__(self):
return self.description
def get_absolute_url(self):
return reverse('order:order-detail',kwargs={'pk':self.id})
class Meta:
permissions=(("can_approve_order","can_reject_order"),)
class Supplier(models.Model):
name=models.TextField()
location=models.TextField()
phonenumber=models.TextField()
email=models.EmailField()
website=models.URLField()
def __str__(self):
return self.name
|
#!/usr/bin/env python
import subprocess
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
import re
@operation
def start(**kwargs):
package_manager = ctx.node.properties['package_manager']
ctx.logger.info('Installing ntp')
install_proc = subprocess.Popen(['sudo', package_manager, 'install', 'ntp'], stdout=subprocess.PIPE)
which_proc = subprocess.Popen(['which', 'yum'], stdout=subprocess.PIPE)
while True:
line = which_proc.stdout.readline()
if line != '' or r'\n':
#the real code does filtering here
# we're adding a property which is set during runtime to the runtime
# properties of that specific node instance
ctx.instance.runtime_properties['ntp_path'] = line.rstrip()
else:
break
systemctl_enable_proc = subprocess.Popen(['systemctl', 'start', 'ntp' ])
#check if ntp is running, otherwise throw error
systemctl_status_proc = subprocess.Popen(['systemctl', 'start', 'ntp' ], stdout=subprocess.PIPE)
for line in systemctl_status_proc.stdout.readlines():
if re.search('inactive (dead)', line):
raise NonRecoverableError("Failed to start NTP")
else:
ctx.logger.info('NTP installed')
|
import requests
import datetime
import time
from collections import namedtuple
from enum import Enum
class Rarity(Enum):
COMMON = 1
UNCOMMON = 2
RARE = 3
VERY_RARE = 4
ULTRA_RARE = 5
class Team(Enum):
MYSTIC=1
VALOR=2
INSTINCT=3
TEAM_NAMES = {
0: 'Uncontested',
1: 'Mystic',
2: 'Valor',
3: 'Instinct'
}
Spawn = namedtuple('Spawn', [
'disappear_time',
'encounter_id',
'location',
'pokemon',
])
Pokemon = namedtuple('Pokemon', [
'individual_attack',
'individual_defense',
'individual_stamina',
'move_1',
'move_2',
'weight',
'height',
'cp',
'cp_multiplier',
'level',
'gender',
'form',
'id',
'name',
'rarity',
'types',
])
Pokestop = namedtuple('Pokestop', [
'active_fort_modifier',
'enabled',
'last_modified',
'location',
'lure_expiration',
'id',
])
Gym = namedtuple('Gym', [
'enabled',
'guard_pokemon_id',
'id',
'slots_available',
'last_modified',
'location',
'name',
'team',
'team_name',
'pokemon',
'total_gym_cp',
'raid_level',
'raid_pokemon',
'raid_start',
'raid_end',
])
def bts(b):
'''
Bool To String, the API wants 'true' and 'false' instead of normal bools
'''
return 'true' if b else 'false'
def stv(s):
'''
String to value, converts 'null', 'true', 'false' to their python counterparts
'''
return {'true': True, 'false': False, 'null': None}.get(s, s)
def string_to_rarity(s):
'''
Convert a string (eg "Common") into a Rarity
'''
return getattr(Rarity, s.upper().replace(' ', '_'))
class MonacleScraper(object):
def __init__(self, url, token):
self.url = url
self.token = token
self.session = requests.Session()
def get_raw_data(self, sw_point, ne_point, pokemon=True, pokestops=True, lured_only=False, gyms=True):
"""
Get data from the Monacle server
pokemon: If true, get data about pokemon spawns
pokestops: If true, get data about pokestops
lured_only: If pokestops is true and you set this to true, only return lured pokestops.
gyms: If true, get data about gyms
sw_point: South west corner of the box to search, (lat, long)
ne_point: North east corner of the box to search, (lat, long)
"""
data = {
'timestamp': int(time.time()),
'pokemon': bts(pokemon),
'lastpokemon': 'true',
'pokestops': bts(pokestops),
'lastpokestops': 'true',
'luredonly': bts(lured_only),
'gyms': bts(gyms),
'lastgyms': 'true',
'scanned': 'false',
'lastslocs': 'false',
'spawnpoints': 'false',
'lastspawns': 'false',
'swLat': sw_point[0],
'swLng': sw_point[1],
'neLat': ne_point[0],
'neLng': ne_point[1],
'oSwLat': sw_point[0],
'oSwLng': sw_point[1],
'oNeLat': ne_point[0],
'oNeLng': ne_point[1],
'eids': '',
'token': self.token
}
r = self.session.post(self.url, data=data)
response = r.json()
spawns = []
# For some stupid reason, the server sometimes returns a dict, sometimes a list. Have to handle this.
pokemons = response.get('pokemons', [])
if isinstance(pokemons, dict):
pokemons = pokemons.values()
for spawn in pokemons:
pokemon = Pokemon(
individual_attack=stv(spawn['individual_attack']),
individual_defense=stv(spawn['individual_defense']),
individual_stamina=stv(spawn['individual_stamina']),
move_1=stv(spawn['move_1']),
move_2=stv(spawn['move_2']),
weight=stv(spawn['weight']),
height=stv(spawn['height']),
cp=stv(spawn['cp']),
cp_multiplier=stv(spawn['cp_multiplier']),
level=stv(spawn['level']),
gender=stv(spawn['gender']),
form=stv(spawn['form']),
id=spawn['pokemon_id'],
name=spawn['pokemon_name'],
rarity=string_to_rarity(spawn['pokemon_rarity']),
types='', # TODO: Make this actually work
)
spawns.append(Spawn(
disappear_time=datetime.datetime.fromtimestamp(spawn['disappear_time'] / 1000),
encounter_id=int(spawn['encounter_id']),
location=(spawn['latitude'], spawn['longitude']),
pokemon=pokemon,
))
pokestops = []
for pokestop in response.get('pokestops', []):
pokestops.append(Pokestop(
active_fort_modifier=stv(pokestop['active_fort_modifier']),
enabled=stv(pokestop['enabled']),
last_modified=None if pokestop['last_modified'] == 0 else datetime.datetime.fromtimestamp(pokestop['last_modified'] / 1000),
location=(pokestop['latitude'], pokestop['longitude']),
lure_expiration='', # TODO: Make this work, map may not be pulling this information
id=pokestop['pokestop_id']
))
gyms = []
gyms_list = response.get('gyms', [])
if isinstance(gyms_list, dict):
gyms_list = gyms_list.values()
for gym in gyms_list:
if gym.get('raid_pokemon_id'):
raid_pokemon = Pokemon(
id=int(gym['raid_pokemon_id']),
name=gym['raid_pokemon_name'],
cp=gym['raid_pokemon_cp'],
move_1=gym['raid_pokemon_move_1'],
move_2=gym['raid_pokemon_move_2'],
individual_attack=None,
individual_defense=None,
individual_stamina=None,
weight=None,
height=None,
cp_multiplier=None,
level=None,
gender=None,
form=None,
rarity=None,
types=None
)
raid_start = datetime.datetime.fromtimestamp(gym['raid_start'] / 1000)
raid_end = datetime.datetime.fromtimestamp(gym['raid_end'] / 1000)
else:
raid_pokemon = None
raid_start = None
raid_end = None
gyms.append(Gym(
enabled=stv(gym['enabled']),
guard_pokemon_id=gym['guard_pokemon_id'],
id=gym['gym_id'],
slots_available=int(gym['slots_available']),
last_modified=None if gym['last_modified'] == 0 else datetime.datetime.fromtimestamp(gym['last_modified'] / 1000),
location=(gym['latitude'], gym['longitude']),
name=None, # KentPogoMap doesn't enter gyms for scanning currently
team=int(gym['team_id']),
team_name=TEAM_NAMES[int(gym['team_id'])],
pokemon=[], # KentPogoMap doesn't enter gyms for scanning currently
total_gym_cp=None,
raid_level=int(gym.get('raid_level')),
raid_pokemon=raid_pokemon,
raid_start=raid_start,
raid_end=raid_end
))
return spawns, pokestops, gyms
|
import uuid
from datetime import timedelta
from django.core.exceptions import ValidationError
from django.db import models
from accounts.models import UserModel
from ntnui.utils.send_email import send_email
from django.utils import timezone
from django.utils.translation import gettext as _
class ResetPassword(models.Model):
""" Model containing token used to reset user password """
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
expiration_date = models.DateTimeField(null=False)
user = models.ForeignKey(UserModel, on_delete=models.PROTECT, null=False)
def save(self, *args, **kwargs):
""" Deletes old token, and sends a reset password email to the user"""
# Delete old tokens
ResetPassword.objects.filter(user=self.user).delete()
# Save the instance
self.expiration_date = timezone.now() + timedelta(days=1)
super().save(*args, **kwargs)
send_email(
data={"user": self.user, "token": self.uuid},
subject=_("NTNUI - Reset password"),
to=[self.user.email],
path="emails/reset_password.html",
)
@classmethod
def validate_token(cls, token):
""" Ensure that the token exists and that it is valid"""
try:
reset_password = cls.objects.get(uuid=token)
return reset_password.expiration_date > timezone.now()
except (cls.DoesNotExist, ValidationError):
return False
|
#!/usr/local/bin/python
#-------------------------------------------------------------------------------
# Name: delete_layerInfo
# Purpose:
#
# Author: Gerald Perkins
#
# Created: 19/01/2016
# Copyright: (c) Entiro Systems Ltd. 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
import sys, json
from classInfo import *
from gisUser import *
import logging, datetime
timeNow = str(datetime.datetime.now())[0:19]
logging.basicConfig(filename='logs/delete_layerInfo.log', level=logging.INFO)
logging.info("Start " + timeNow)
dataObj = json.load(sys.stdin)
logging.info(str(dataObj))
#dataObj = {'province': 'AB', 'city': 'Calgary', 'description': '', 'practiceType': 'DENTAL', 'authCode': 'glp1519165111.25', 'address': '1021 Maggie St SE', 'userID': '1', 'contactPerson': 'Gerald Perkins', 'saleOrLease': 'SALE', 'status': 'PENDING', 'FID': '2', 'country': 'Canada', 'pdf': '', 'size': '1111'}
print 'Content-Type: application/json\n\n'
# Ensure the user is logged in
userID = dataObj['userID']
authCode = dataObj['authCode']
logging.info("userID: " + userID + ", authCode: " + authCode)
u = GisUser()
u.filterAuthCode(authCode)
loginOK = False
if u.fetch(userID):
if u.isLoggedIn():
loginOK = True
if not loginOK:
logging.info("*** Error *** User not logged in, userID: " + str(userID))
print '{"ERROR":"Not logged in"}'
quit()
theID = dataObj['uniqueID']
logging.info("Layer to delete: " + str(theID))
msg = ""
o = LayerInfo()
o.uniqueID = theID
if o.delete():
msg = "Layer deleted"
else:
errMsg = o.errorMsg()
logging.info(errMsg)
if "not logged in" in errMsg:
print '{"ERROR":"Not logged in"}'
else:
print '{"ERROR":"' + errMsg + '"}';
logging.info(msg)
rtn = '{"status":"SUCCESS","message":"' + str(count) + msg + '"}'
print rtn |
import numpy as np
from pampy import match
class Horse:
death_reason = ['996工作', '未知原因', '突发恶疾', '马腿抽筋', '精疲力尽']
death_icon = '\N{skull and crossbones}'
running_icon = '\N{horse}'
@staticmethod
def _limiter(data: float, lower: float, upper: float) -> float:
if data < lower:
return lower
elif data > upper:
return upper
else:
return data
def __init__(self, num: int):
self.speed = Horse._limiter(np.random.normal(5, 1.5), 0, 10)
self.death_rate = Horse._limiter(np.random.normal(3, 3), 0, 30)/100
self.stability = Horse._limiter(np.random.normal(2.5, 2.5), 0, 6)
_stop_icon_bernoulli_pointer = np.random.rand()
self.stop_icon = '\N{horse face}' if _stop_icon_bernoulli_pointer < 0.8 else '\N{unicorn face}'
self.track_length = 30
self.position = 0
self.state = 'stop'
self.number = num
self.finish_distance = 0
def get_current_track_str(self):
track = list(' ' * self.track_length)
icon = match(self.state,
'stop', self.stop_icon,
'running', self.running_icon,
'dead', self.death_icon)
track[self.position] = icon
return ''.join(track[::-1])
def move(self):
if self.state in ['running', 'stop']:
self.state = 'running'
if np.random.rand() < self.death_rate:
self.state = 'dead'
return
self.position += Horse._limiter(
round(self.speed + np.random.normal(0, self.stability)
), 0, 15)
if self.position >= self.track_length-1:
self.finish_distance = self.position
self.position = self.track_length - 1
def get_property(self):
return f'{self.number}号马,速度{self.speed:.2f},不稳定性{self.stability:.4f},每次移动意外出局率{self.death_rate:.2%}'
def __str__(self):
return self.get_property()
def get_tracks_str(horse_list: list[Horse]) -> str:
return '\n'.join(map(lambda x: f'{x[0]} |{x[1].get_current_track_str()}', enumerate(horse_list, 1))) |
import base64
from io import BytesIO
from PIL import Image
def convert_and_save(b64_string):
with open("imageToSave.jpg", "wb") as fh:
fh.write(base64.decodebytes(b64_string.encode()))
def save_captured_image(file, image_name):
starter = file.find(',')
image_data = file[starter+1:]
image_data = bytes(image_data, encoding="ascii")
im = Image.open(BytesIO(base64.b64decode(image_data)))
cap_image_path = 'static/captured/'+image_name+'.jpg'
im.save(cap_image_path)
return cap_image_path
|
"""The main module for statistics sending package."""
import logging
from argparse import ArgumentParser
from datetime import datetime, timedelta
from json import dump, dumps, load
from os import getenv
try:
from urllib.error import HTTPError, URLError
from urllib.request import Request, urlopen
except ImportError:
from urllib2 import HTTPError, URLError, Request, urlopen
from time import sleep
from traceback import format_exc
from six.moves.configparser import ConfigParser
from vnc_api.vnc_api import VncApi
def parse_args():
"""Parse command-line arguments to start stats service."""
parser = ArgumentParser()
parser.add_argument("--config-file", required=True)
args = parser.parse_args()
return args
def parse_config(args):
"""Parse configuration file for stats service."""
config = ConfigParser()
config.read(args.config_file)
log_file = config.get("DEFAULT", "log_file")
log_lev_map = {"SYS_EMERG": logging.CRITICAL,
"SYS_ALERT": logging.CRITICAL,
"SYS_CRIT": logging.CRITICAL,
"SYS_ERR": logging.ERROR,
"SYS_WARN": logging.WARNING,
"SYS_NOTICE": logging.INFO,
"SYS_INFO": logging.INFO,
"SYS_DEBUG": logging.DEBUG
}
log_level = log_lev_map[config.get("DEFAULT", "log_level")]
stats_server = config.get("DEFAULT", "stats_server")
state = config.get("DEFAULT", "state")
return {"log_file": log_file,
"log_level": log_level,
"stats_server": stats_server,
"state": state
}
def init_logger(log_level, log_file):
"""Initialise logger for stats service."""
logger = logging.getLogger(name="stats_client")
logger.setLevel(level=log_level)
handler = logging.FileHandler(filename=log_file)
handler.setLevel(level=log_level)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
class Stats(object):
"""Contrail Statistics class."""
def __init__(self, client):
"""Initialise Statistics objest."""
self.tf_id = client.get_default_project_id()
self.vmachines = len(client.virtual_machines_list().get(
'virtual-machines'))
self.vnetworks = len(client.virtual_networks_list().get(
'virtual-networks'))
self.vrouters = len(client.virtual_routers_list().get(
'virtual-routers'))
self.vm_interfaces = len(client.virtual_machine_interfaces_list().get(
'virtual-machine-interfaces'))
def __str__(self):
"""Represent statistics object as string for logging."""
return str({"tf_id": self.tf_id,
"vr": self.vrouters,
"vm": self.vmachines,
"vn": self.vnetworks,
"vi": self.vm_interfaces})
class Scheduler(object):
"""Schedule job for statistics sending."""
DEF_SEND_FREQ = None
def __init__(self, vnc_client, state):
"""Initialise Scheduler instance."""
self.state = state
self._vnc_client = vnc_client
self._state_data = self._get_state_data()
self._send_freq = self._state_data.get(
"send_freq",
self._get_updated_send_freq())
self._sched_job_ts = self._state_data.get(
"sched_job_ts",
self._init_first_job())
self._save_state_data()
@property
def sched_job_ts(self):
"""Get scheduled job timestamp for statistics sending."""
return self._sched_job_ts
@sched_job_ts.setter
def sched_job_ts(self, send_freq):
"""Schedule new job for statistics sending."""
if (send_freq is not None):
self._sched_job_ts = datetime.now() + send_freq
else:
self._sched_job_ts = None
self._save_state_data()
@property
def send_freq(self):
"""Get sending frequency."""
return self._send_freq
@send_freq.setter
def send_freq(self, updated_send_freq):
"""Set sending frequency."""
self._send_freq = updated_send_freq
self._save_state_data()
def _get_state_data(self):
try:
with open(self.state) as json_file:
state_data = load(json_file)
except (ValueError, IOError):
state_data = dict()
return state_data
def _init_first_job(self):
if (self.send_freq is not None):
sched_job_ts = datetime.now() + self.send_freq
else:
sched_job_ts = None
return sched_job_ts
def _save_state_data(self):
state_data = dict()
state_data["sched_job_ts"] = self.sched_job_ts
state_data["send_freq"] = self.send_freq
with open(self.state, 'w') as state_file:
dump(state_data, state_file)
def _get_updated_send_freq(self):
send_freq = Scheduler.DEF_SEND_FREQ
freq_list = [{"label=stats_monthly": timedelta(days=30)},
{"label=stats_weekly": timedelta(days=7)},
{"label=stats_daily": timedelta(days=1)},
{"label=stats_every_minute": timedelta(minutes=1)}]
for tag in self._vnc_client.tags_list()["tags"]:
tag = tag["fq_name"][0]
for index, freq_item in enumerate(freq_list):
if tag in freq_item:
send_freq = freq_item[tag]
del freq_list[index:]
if not freq_list:
return send_freq
return send_freq
def is_job(self):
"""Check if there is scheduled job which must be executed now."""
# statistics will be sent if:
# 1. frequency of sending was changed
# and sending is not switched off.
# 2. statistics sending was not scheduled yet
# 3. scheduled stats sending timestamp was already passed
newest_send_freq = self._get_updated_send_freq()
if newest_send_freq is None:
self.sched_job_ts = None
self.send_freq = None
return False
elif self.sched_job_ts is None:
self.send_freq = newest_send_freq
self.sched_job_ts = self.send_freq
return True
elif self.send_freq != newest_send_freq:
self.send_freq = newest_send_freq
self.sched_job_ts = self.send_freq
return True
elif datetime.now() > self.sched_job_ts:
self.sched_job_ts = self.send_freq
return True
return False
class Postman(object):
"""Send statistics ang response from statistics server."""
SLEEP_TIME = 3600
def __init__(self, stats_server, vnc_client, logger):
"""Initialise Postman instance for statistics sending job."""
self._vnc_client = vnc_client
self._stats_server = stats_server
self.logger = logger
def send_stats(self):
"""Send statistics to server."""
self.logger.info("Statistics sending started..")
RESP = {
201: {"success": True,
"message": ""},
200: {"success": False,
"message": "The server response code is 200. \
Successfull stats server response code is 201."},
404: {"success": False,
"message": "The server URI was not found."},
400: {"success": False,
"message": "Malformed or resubmitted data."}
}
stats = Stats(client=self._vnc_client)
try:
resp_code = urlopen(
url=Request(
url=self._stats_server,
data=dumps(stats.__dict__).encode('utf-8'),
headers={'Content-Type': 'application/json'})).code
def_err = {"success": False,
"message": "Uknown error. HTTP code: %s." % resp_code}
except HTTPError as e:
resp_code = e.code
def_err = {
"success": False,
"message": "Uknown error. HTTP error code: %s." % resp_code}
except URLError as e:
resp_code = e.reason[1]
def_err = {"success": False,
"message": "Uknown error. URLError: %s." % resp_code}
except Exception:
resp_code = "unknown"
def_err = {
"success": False,
"message": "Unknown error. Traceback: %s" % str(format_exc())}
finally:
self.logger.info(str(RESP.get(resp_code, def_err)))
self.logger.debug("stats: %s" % (str(stats)))
def main():
"""Do the main logic of statistics service."""
config = parse_config(args=parse_args())
logger = init_logger(log_level=config["log_level"],
log_file=config["log_file"])
vnc_client = VncApi(username=getenv("KEYSTONE_AUTH_ADMIN_USER"),
password=getenv("KEYSTONE_AUTH_ADMIN_PASSWORD"),
tenant_name=getenv("KEYSTONE_AUTH_ADMIN_TENANT"))
scheduler = Scheduler(vnc_client=vnc_client, state=config["state"])
postman = Postman(stats_server=config["stats_server"],
vnc_client=vnc_client,
logger=logger)
while True:
logger.info("TF usage report client started.")
if scheduler.is_job():
postman.send_stats()
logger.info(
"Frequency of statistics sending is %s" % str(scheduler.send_freq))
logger.info(
"Statistics sending is scheduled at %s" % str(
scheduler.sched_job_ts))
sleep(Postman.SLEEP_TIME)
|
# -*- coding: utf-8 -*-
import os
from pathlib import Path
import click
import nbformat
from . import __version__
@click.command(name="nbtouch")
@click.version_option(version=__version__)
@click.argument("file", nargs=-1)
@click.pass_context
def touch(ctx, *args, **kwargs):
"""Update the access and modification times of each Jupyter notebook to the current time.
If FILE does not exist, it will be created as an empty notebook.
"""
for file in kwargs["file"]:
if not os.path.exists(file):
nb = nbformat.v4.new_notebook()
with open(file, "w") as f:
nbformat.write(nb, f, version=4)
else:
Path(file).touch()
|
#!/usr/bin/env python3
import os
import subprocess
import sys
import threading
import time
loc = os.path.dirname(os.path.abspath(__file__))
asp_path = os.path.join(loc,'asp')
def handlestderr(inhandle,outfile):
with open(outfile,'w') as outhandle:
for line in inhandle:
print(time.time(),':',line.decode(),file=outhandle,end='')
def main(args,outhandle=sys.stdout,errfile=None):
proc = subprocess.Popen(
[loc + '/.stack-work/install/x86_64-linux/lts-12.5/8.4.3/bin/mcp-exe'] + args,
stdout=subprocess.PIPE, stderr=(errfile and subprocess.PIPE), env={'ASP_PATH' : asp_path}
)
try:
if errfile is not None:
threading.Thread(target=handlestderr,args=(proc.stderr,errfile)).start()
print(proc.stdout.read().decode(),file=outhandle)
finally:
proc.kill()
if __name__ == '__main__':
main(sys.argv[1:])
|
print('This program will check if the two lines typed in are anagrams!\n')
print('Type "help" if you need more information about anagrams,\n\nand "No, thanks" to start!\n')
answer=input()
if(answer=='help'):
print('\nOk, let me tell you about what anagram is.\n')
input()
print('If you say the two lines are anagram,\nyou can make the first one by reassembling the second one. \n')
input()
print("For example, the word 'desserts' and 'stressed' are anagrams.\n")
input()
print("But it doesn't have to be the exact opposite.\n")
input()
print("The word 'Elvis' and 'lives' are also anagrams.\n")
input()
print('Did you get it? (Y/N)\n')
answer=input()
if(answer=='Y'):
print("\nOk, then. Let's start!")
if(answer=='N'):
print("\nWell, you'll have to learn by doing it. Let's start anyway!")
if(answer=='No, thanks'):
print("\nOk, then. Let's start!")
print('\n')
p=input('Type in the first: ')
q=input('Type in the second: ')
letter=list(p)
letters=list(q)
a=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26]
a[0]=letter.count('a')+letter.count('A')
a[1]=letter.count('b')+letter.count('B')
a[2]=letter.count('c')+letter.count('C')
a[3]=letter.count('d')+letter.count('D')
a[4]=letter.count('e')+letter.count('E')
a[5]=letter.count('f')+letter.count('F')
a[6]=letter.count('g')+letter.count('G')
a[7]=letter.count('h')+letter.count('H')
a[8]=letter.count('i')+letter.count('I')
a[9]=letter.count('j')+letter.count('J')
a[10]=letter.count('k')+letter.count('K')
a[11]=letter.count('l')+letter.count('L')
a[12]=letter.count('m')+letter.count('M')
a[13]=letter.count('n')+letter.count('N')
a[14]=letter.count('o')+letter.count('O')
a[15]=letter.count('p')+letter.count('P')
a[16]=letter.count('q')+letter.count('Q')
a[17]=letter.count('r')+letter.count('R')
a[18]=letter.count('s')+letter.count('S')
a[19]=letter.count('t')+letter.count('T')
a[20]=letter.count('u')+letter.count('U')
a[21]=letter.count('v')+letter.count('V')
a[22]=letter.count('w')+letter.count('W')
a[23]=letter.count('x')+letter.count('X')
a[24]=letter.count('y')+letter.count('Y')
a[25]=letter.count('z')+letter.count('Z')
b=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26]
b[0]=letters.count('a')+letters.count('A')
b[1]=letters.count('b')+letters.count('B')
b[2]=letters.count('c')+letters.count('C')
b[3]=letters.count('d')+letters.count('D')
b[4]=letters.count('e')+letters.count('E')
b[5]=letters.count('f')+letters.count('F')
b[6]=letters.count('g')+letters.count('G')
b[7]=letters.count('h')+letters.count('H')
b[8]=letters.count('i')+letters.count('I')
b[9]=letters.count('j')+letters.count('J')
b[10]=letters.count('k')+letters.count('K')
b[11]=letters.count('l')+letters.count('L')
b[12]=letters.count('m')+letters.count('M')
b[13]=letters.count('n')+letters.count('N')
b[14]=letters.count('o')+letters.count('O')
b[15]=letters.count('p')+letters.count('P')
b[16]=letters.count('q')+letters.count('Q')
b[17]=letters.count('r')+letters.count('R')
b[18]=letters.count('s')+letters.count('S')
b[19]=letters.count('t')+letters.count('T')
b[20]=letters.count('u')+letters.count('U')
b[21]=letters.count('v')+letters.count('V')
b[22]=letters.count('w')+letters.count('W')
b[23]=letters.count('x')+letters.count('X')
b[24]=letters.count('y')+letters.count('Y')
b[25]=letters.count('z')+letters.count('Z')
A=a.count(0)
B=b.count(0)
if A==26:
if B==26:
print('\nThey should be typed in english...')
elif p==q:
print('\nThey are the same one!')
else:
if a==b:
print('\nAnagram!')
else:
print('\nNot anagram...')
input('\nPress Enter to continue...')
exit()
|
# -*- coding: utf-8 -*-
class SynDictionary:
def __init__(self):
self.map = {}
self.load()
def load(self):
"builds reverse index syn_word -> root_word from table root_word: sw1, sw2, ... swN"
t = """POS пос
АД декларация, алкаш
обмен репликация
"""
pass
def get(self, word):
""" returns root word or passed word if no root word found """
if self.map.exist(word):
return self.map[word]
else:
return word
class StopWordFilter:
def filter(self, word):
"""returns either passed word or empty"""
return word
class Tokenizer:
pass
class MorphoFilter:
def filter(self, word):
pass
class Index:
def __init__(self):
pass
def add(self, text_path, text_body):
# calc text hash
# if such hash found - ignore the text - because it is processed already
pass
class Program:
def __init__(self):
self.index = Index()
pass
def help(self):
pass
def create(self, db_path):
self.db_path = db_path
self.create_folder(db_path)
# TODO: create sqllite db
pass
def index(self, source_path):
# loop over files in source folder
# and index its content
self.index.add(text_name, text_body)
pass
def source(self, top_word_count):
# get top words and outputs relevant sources
pass
def list(self):
pass
def create_folder(self, path):
pass
# ENTRY
p = Program()
# parse cmd line
# execute command
p.create("./db")
p.index("./source")
p.list() #
p.source(10) # source top ten words |
# Generated by Django 3.2.3 on 2021-06-19 23:07
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='servicio',
fields=[
('idservicio', models.IntegerField(primary_key=True, serialize=False, verbose_name='Id de servicio')),
('nomservicio', models.CharField(max_length=100, verbose_name='Nombre de servicio')),
('valorservicio', models.CharField(max_length=100, verbose_name='valor servicio por hora')),
],
),
migrations.CreateModel(
name='usuario',
fields=[
('idusuario', models.IntegerField(primary_key=True, serialize=False, verbose_name='Id de usuario')),
('nombreusuario', models.CharField(max_length=16, verbose_name='Nombre de usuario')),
('contrausuario', models.CharField(max_length=12, verbose_name='Password')),
],
),
migrations.CreateModel(
name='usuariomoroso',
fields=[
('idmoroso', models.IntegerField(primary_key=True, serialize=False, verbose_name='Id de moroso')),
('nombremoroso', models.CharField(max_length=16, verbose_name='Nombre de moroso')),
('mesesmoroso', models.CharField(max_length=100, verbose_name='meses de morosidad')),
],
),
]
|
# Copyright 2021 Tampere University and VTT Technical Research Centre of Finland
# This software was developed as a part of the ProCemPlus project: https://www.senecc.fi/projects/procemplus
# This source code is licensed under the MIT license. See LICENSE in the repository root directory.
# Author(s): Amir Safdarian <amir.safdarian@vtt.fi>
# Kalle Ruuth (TAU) <kalle.ruuth@tuni.fi>
# Keski-Koukkari Antti <antti.keski-koukkari@vtt.fi>
# Md Tanjimuddin <md.tanjimuddin@tuni.fi>
# Olli Suominen <olli.suominen@tuni.fi>
# Otto Hylli <otto.hylli@tuni.fi>
# Tanjim <tanjim0023@gmail.com>
# Ville Heikkilä <ville.heikkila@tuni.fi>
# Ville Mörsky (TAU) <ville.morsky@tuni.fi>
"""Tests that take the domain message examples from the wiki pages and create message objects."""
import unittest
from tools.messages import MessageGenerator, QuantityBlock, TimeSeriesBlock
from tools.message.block import ValueArrayBlock
from tools.tools import FullLogger
from domain_messages.ControlState import ControlStatePowerSetpointMessage
from domain_messages.dispatch import DispatchBlock, ResourceForecastStateDispatchMessage
from domain_messages.InitCISCustomerInfo import InitCISCustomerInfoMessage
from domain_messages.LFMMarketResult import LFMMarketResultMessage
from domain_messages.Offer import OfferMessage
from domain_messages.price_forecaster import PriceForecastStateMessage
from domain_messages.Request import RequestMessage
from domain_messages.resource import ResourceStateMessage
from domain_messages.resource_forecast import ResourceForecastPowerMessage
LOGGER = FullLogger(__name__)
SIMULATION_ID = "2020-01-01T00:00:00.000Z"
SOURCE_PROCESS_ID = "test-component"
EPOCH_NUMBER = 10
TRIGGERING_MESSAGE_IDS = ["component1-10", "component2-10"]
class TestValidMessages(unittest.TestCase):
"""Unit tests for testing some creating message objects using valid parameters."""
generator = MessageGenerator(SIMULATION_ID, SOURCE_PROCESS_ID)
def test_valid_init_cis_customer_info_message(self):
"""Test creation of InitCISCustomerInfoMessage with valid parameters."""
resource_id = ["load1", "load2", "load3"]
customer_id = ["GridA-1", "GridA-1", "GridA-1"]
bus_name = ["2", "1", ""]
message = self.generator.get_message(
InitCISCustomerInfoMessage,
EpochNumber=EPOCH_NUMBER,
TriggeringMessageIds=TRIGGERING_MESSAGE_IDS,
ResourceId=resource_id,
CustomerId=customer_id,
BusName=bus_name
)
# only test the message type and the attributes that are not included in AbstractResultMessage
self.assertIsInstance(message, InitCISCustomerInfoMessage)
if isinstance(message, InitCISCustomerInfoMessage):
self.assertEqual(message.message_type, "Init.CIS.CustomerInfo")
self.assertEqual(message.resource_id, resource_id)
self.assertEqual(message.customer_id, customer_id)
self.assertEqual(message.bus_name, bus_name)
def test_valid_lfm_marker_result_message(self):
"""Test creation of LFMMarketResultMessage with valid parameters."""
activation_time = "2020-06-03T04:00:00.000Z"
duration = QuantityBlock(Value=60, UnitOfMeasure="Minute")
direction = "upregulation"
real_power = TimeSeriesBlock(
TimeIndex=[
"2020-06-03T04:00:00.000Z",
"2020-06-03T04:15:00.000Z",
"2020-06-03T04:30:00.000Z",
"2020-06-03T04:45:00.000Z"
],
Series={
"Regulation": ValueArrayBlock(
UnitOfMeasure="kW",
Values=[200, 300, 150, 210]
)
}
)
price = QuantityBlock(Value=50, UnitOfMeasure="EUR")
customer_ids = ["CustomerIdA", "CustomerIdB"]
congestion_id = "XYZ"
offer_id = "Elenia-2020-06-03T04:15:07.456Z"
result_count = 3
message = self.generator.get_message(
LFMMarketResultMessage,
EpochNumber=EPOCH_NUMBER,
TriggeringMessageIds=TRIGGERING_MESSAGE_IDS,
ActivationTime=activation_time,
Duration=duration,
Direction=direction,
RealPower=real_power,
Price=price,
CongestionId=congestion_id,
OfferId=offer_id,
ResultCount=result_count,
CustomerIds=customer_ids
)
# only test the message type and the attributes that are not included in AbstractResultMessage
self.assertIsInstance(message, LFMMarketResultMessage)
if isinstance(message, LFMMarketResultMessage):
self.assertEqual(message.message_type, "LFMMarketResult")
self.assertEqual(message.activation_time, activation_time)
self.assertEqual(message.duration, duration)
self.assertEqual(message.direction, direction)
self.assertEqual(message.real_power, real_power)
self.assertEqual(message.price, price)
self.assertEqual(message.congestion_id, congestion_id)
self.assertEqual(message.offer_id, offer_id)
self.assertEqual(message.result_count, result_count)
self.assertEqual(message.customerids, customer_ids)
def test_valid_offer_message(self):
"""Test creation of OfferMessage with valid parameters."""
activation_time = "2020-06-03T04:00:00.000Z"
duration = QuantityBlock(Value=60, UnitOfMeasure="Minute")
direction = "upregulation"
real_power = TimeSeriesBlock(
TimeIndex=[
"2020-06-03T04:00:00.000Z",
"2020-06-03T04:15:00.000Z",
"2020-06-03T04:30:00.000Z",
"2020-06-03T04:45:00.000Z"
],
Series={
"Regulation": ValueArrayBlock(
UnitOfMeasure="kW",
Values=[200, 300, 150, 210]
)
}
)
price = QuantityBlock(Value=50, UnitOfMeasure="EUR")
customer_ids = ["CustomerIdA", "CustomerIdB"]
congestion_id = "XYZ"
offer_id = "Elenia-2020-06-03T04:15:07.456Z"
offer_count = 1
message = self.generator.get_message(
OfferMessage,
EpochNumber=EPOCH_NUMBER,
TriggeringMessageIds=TRIGGERING_MESSAGE_IDS,
ActivationTime=activation_time,
Duration=duration,
Direction=direction,
RealPower=real_power,
Price=price,
CongestionId=congestion_id,
CustomerIds=customer_ids,
OfferId=offer_id,
OfferCount=offer_count
)
# only test the message type and the attributes that are not included in AbstractResultMessage
self.assertIsInstance(message, OfferMessage)
if isinstance(message, OfferMessage):
self.assertEqual(message.message_type, "Offer")
self.assertEqual(message.activation_time, activation_time)
self.assertEqual(message.duration, duration)
self.assertEqual(message.direction, direction)
self.assertEqual(message.real_power, real_power)
self.assertEqual(message.price, price)
self.assertEqual(message.congestion_id, congestion_id)
self.assertEqual(message.customerids, customer_ids)
self.assertEqual(message.offer_id, offer_id)
self.assertEqual(message.offer_count, offer_count)
def test_valid_request_message(self):
"""Test creation of RequestMessage with valid parameters."""
activation_time = "2020-06-03T04:00:00.000Z"
duration = QuantityBlock(Value=30, UnitOfMeasure="Minute")
direction = "upregulation"
real_power_min = QuantityBlock(Value=100.0, UnitOfMeasure="kW")
real_power_request = QuantityBlock(Value=700.0, UnitOfMeasure="kW")
customer_ids = ["Elenia10", "Elenia170"]
congestion_id = "XYZ"
message = self.generator.get_message(
RequestMessage,
EpochNumber=EPOCH_NUMBER,
TriggeringMessageIds=TRIGGERING_MESSAGE_IDS,
ActivationTime=activation_time,
Duration=duration,
Direction=direction,
RealPowerMin=real_power_min,
RealPowerRequest=real_power_request,
CustomerIds=customer_ids,
CongestionId=congestion_id
)
# only test the message type and the attributes that are not included in AbstractResultMessage
self.assertIsInstance(message, RequestMessage)
if isinstance(message, RequestMessage):
self.assertEqual(message.message_type, "Request")
self.assertEqual(message.activation_time, activation_time)
self.assertEqual(message.duration, duration)
self.assertEqual(message.direction, direction)
self.assertEqual(message.real_power_min, real_power_min)
self.assertEqual(message.real_power_request, real_power_request)
self.assertEqual(message.customer_ids, customer_ids)
self.assertEqual(message.congestion_id, congestion_id)
self.assertIsNone(message.bid_resolution)
def test_control_state_power_setpoint_message(self):
"""Test creation of ControlStatePowerSetpointMessage with valid parameters."""
real_power = QuantityBlock(Value=100.0, UnitOfMeasure="kW")
reactive_power = QuantityBlock(Value=0.0, UnitOfMeasure="kV.A{r}")
message = self.generator.get_message(
ControlStatePowerSetpointMessage,
EpochNumber=EPOCH_NUMBER,
TriggeringMessageIds=TRIGGERING_MESSAGE_IDS,
RealPower=real_power,
ReactivePower=reactive_power
)
# only test the message type and the attributes that are not included in AbstractResultMessage
self.assertIsInstance(message, ControlStatePowerSetpointMessage)
if isinstance(message, ControlStatePowerSetpointMessage):
self.assertEqual(message.message_type, "ControlState.PowerSetpoint")
self.assertEqual(message.real_power, real_power)
self.assertEqual(message.reactive_power, reactive_power)
def test_resource_forecast_state_dispatch_message(self):
"""Test creation of ResourceForecastStateDispatchMessage with valid parameters."""
dispatch = DispatchBlock(
ResourceA=TimeSeriesBlock(
TimeIndex=["2020-06-25T00:00:00Z", "2020-06-25T01:00:00Z"],
Series={
"RealPower": ValueArrayBlock(
UnitOfMeasure="kW",
Values=[0.2, 0.27]
)
}
),
ResourceB=TimeSeriesBlock(
TimeIndex=["2020-06-25T00:00:00Z", "2020-06-25T01:00:00Z"],
Series={
"RealPower": ValueArrayBlock(
UnitOfMeasure="kW",
Values=[0.27, 0.2]
)
}
)
)
message = self.generator.get_message(
ResourceForecastStateDispatchMessage,
EpochNumber=EPOCH_NUMBER,
TriggeringMessageIds=TRIGGERING_MESSAGE_IDS,
Dispatch=dispatch
)
# only test the message type and the attributes that are not included in AbstractResultMessage
self.assertIsInstance(message, ResourceForecastStateDispatchMessage)
if isinstance(message, ResourceForecastStateDispatchMessage):
self.assertEqual(message.message_type, "ResourceForecastState.Dispatch")
self.assertEqual(message.dispatch, dispatch)
def test_price_forecast_state_message(self):
"""Test creation of PriceForecastStateMessage with valid parameters."""
market_id = "assign-market-here"
resource_id = "assign-resource-here"
prices = TimeSeriesBlock(
TimeIndex=[
"2020-02-17T10:00:00Z",
"2020-02-17T11:00:00Z",
"2020-02-17T12:00:00Z"
],
Series={
"Price": ValueArrayBlock(
UnitOfMeasure="{EUR}/(kW.h)",
Values=[0.041, 0.042, 0.043]
)
}
)
message = self.generator.get_message(
PriceForecastStateMessage,
EpochNumber=EPOCH_NUMBER,
TriggeringMessageIds=TRIGGERING_MESSAGE_IDS,
MarketId=market_id,
ResourceId=resource_id,
Prices=prices
)
# only test the message type and the attributes that are not included in AbstractResultMessage
self.assertIsInstance(message, PriceForecastStateMessage)
if isinstance(message, PriceForecastStateMessage):
self.assertEqual(message.message_type, "PriceForecastState")
self.assertEqual(message.marketid, market_id)
self.assertEqual(message.resourceid, resource_id)
self.assertEqual(message.prices, prices)
self.assertIsNone(message.pricingtype)
def test_resource_state_message(self):
"""Test creation of ResourceStateMessage with valid parameters."""
customerid = "customer1"
real_power = QuantityBlock(Value=100.0, UnitOfMeasure="kW")
reactive_power = QuantityBlock(Value=0.0, UnitOfMeasure="kV.A{r}")
node = 2
state_of_charge = QuantityBlock(Value=68.1, UnitOfMeasure="%")
message = self.generator.get_message(
ResourceStateMessage,
EpochNumber=EPOCH_NUMBER,
TriggeringMessageIds=TRIGGERING_MESSAGE_IDS,
CustomerId=customerid,
RealPower=real_power,
ReactivePower=reactive_power,
Node=node,
StateOfCharge=state_of_charge
)
# only test the message type and the attributes that are not included in AbstractResultMessage
self.assertIsInstance(message, ResourceStateMessage)
if isinstance(message, ResourceStateMessage):
self.assertEqual(message.message_type, "ResourceState")
self.assertEqual(message.customerid, customerid)
self.assertEqual(message.real_power, real_power)
self.assertEqual(message.reactive_power, reactive_power)
self.assertEqual(message.node, node)
self.assertEqual(message.state_of_charge, state_of_charge)
def test_resource_forecast_power_message(self):
"""Test creation of ResourceForecastPowerMessage with valid parameters."""
resource_id = "load1"
forecast = TimeSeriesBlock(
TimeIndex=[
"2020-06-25T00:00:00Z",
"2020-06-25T01:00:00Z",
"2020-06-25T02:00:00Z",
"2020-06-25T03:00:00Z"
],
Series={
"RealPower": ValueArrayBlock(
UnitOfMeasure="kW",
Values=[-0.2, -0.27, -0.15, -0.21]
)
}
)
message = self.generator.get_message(
ResourceForecastPowerMessage,
EpochNumber=EPOCH_NUMBER,
TriggeringMessageIds=TRIGGERING_MESSAGE_IDS,
ResourceId=resource_id,
Forecast=forecast
)
# only test the message type and the attributes that are not included in AbstractResultMessage
self.assertIsInstance(message, ResourceForecastPowerMessage)
if isinstance(message, ResourceForecastPowerMessage):
self.assertEqual(message.message_type, "ResourceForecastState.Power")
self.assertEqual(message.resource_id, resource_id)
self.assertEqual(message.forecast, forecast)
if __name__ == "__main__":
unittest.main()
|
"""Utility functions for interacting with the Relay service"""
from __future__ import annotations
import asyncio
import base64
import dataclasses
import datetime
import functools
import inspect
import json
import signal
import weakref
from typing import (Any, Awaitable, Callable, Iterable, Mapping, Optional,
Protocol, Union)
def json_object_hook(dct: Mapping[str, Any]) -> Any:
if '$encoding' in dct:
try:
decoder: Callable[[str], str] = {
'base64': base64.standard_b64decode,
'': lambda data: data,
}[dct['$encoding']]
return decoder(dct['data'])
except KeyError:
# Either dct does not contain data or has an encoding that we can't
# handle.
pass
return dct
class JSONEncoder(json.JSONEncoder):
@functools.singledispatchmethod
def default(self, obj: Any) -> Any:
if dataclasses.is_dataclass(obj):
return dataclasses.asdict(obj)
try:
it = iter(obj)
except TypeError:
pass
else:
return list(it)
return super(JSONEncoder, self).default(obj)
@default.register
def _datetime(self, obj: datetime.datetime) -> str:
return obj.isoformat()
@default.register
def _bytes(self, obj: bytes) -> Union[str, Mapping[str, Any]]:
try:
return obj.decode('utf-8')
except UnicodeDecodeError:
return {
'$encoding': 'base64',
'data': base64.standard_b64encode(obj),
}
def is_async_callable(obj: Any) -> bool:
if not callable(obj):
return False
return (
inspect.iscoroutinefunction(obj) or
inspect.iscoroutinefunction(obj.__call__)
)
TerminationEvent = Callable[[], Awaitable[None]]
class TerminationPolicy(Protocol):
async def attach(self) -> Optional[TerminationEvent]: ...
class NoTerminationPolicy(TerminationPolicy):
async def attach(self) -> Optional[TerminationEvent]:
return None
class SoftTerminationPolicy(TerminationPolicy):
_tasks: weakref.WeakKeyDictionary[asyncio.Task[Any], asyncio.Event]
_timeout_sec: Optional[float]
def __init__(self, *, timeout_sec: Optional[float] = None):
self._tasks = weakref.WeakKeyDictionary()
self._timeout_sec = timeout_sec
async def _terminate_task(self, task: asyncio.Task[Any]) -> None:
event = self._tasks.get(task)
if event is not None:
event.set()
if task.done():
return
if self._timeout_sec is not None:
loop = asyncio.get_running_loop()
loop.call_later(self._timeout_sec, task.cancel)
def terminate_task(self, task: asyncio.Task[Any]) -> None:
asyncio.run_coroutine_threadsafe(
self._terminate_task(task),
task.get_loop(),
)
def terminate_all(self) -> None:
for task in self._tasks:
self.terminate_task(task)
async def attach(self) -> Optional[TerminationEvent]:
task = asyncio.current_task()
assert task is not None
try:
event = self._tasks[task]
except KeyError:
event = asyncio.Event()
self._tasks[task] = event
async def wait() -> None:
await event.wait()
return wait
class SignalTerminationPolicy(TerminationPolicy):
_signals: Iterable[signal.Signals]
_delegate: SoftTerminationPolicy
def __init__(self, *,
signals: Optional[Iterable[signal.Signals]] = None,
timeout_sec: Optional[float] = None):
if signals is None:
signals = [signal.SIGINT, signal.SIGTERM]
self._signals = signals
self._delegate = SoftTerminationPolicy(timeout_sec=timeout_sec)
async def attach(self) -> Optional[TerminationEvent]:
loop = asyncio.get_running_loop()
task = asyncio.current_task()
assert task is not None
event = self._delegate.attach()
for sig in self._signals:
loop.add_signal_handler(sig, self._delegate.terminate_task, task)
return await event
|
import pytest
from sack import sackint
def test_gcd():
assert sackint.gcd(0, 0) == 0
assert sackint.gcd(0, 1) == 1
assert sackint.gcd(1, 0) == 1
assert sackint.gcd(1, 2) == 1
assert sackint.gcd(4, 5) == 1
assert sackint.gcd(4, 6) == 2
assert sackint.gcd(1296,1728) == 432
assert sackint.gcd(18446744073709551615, 13715753599) == 6700417
assert sackint.gcd(13715753599, 18446744073709551615) == 6700417
assert sackint.gcd(0, -1) == 1
assert sackint.gcd(-1, 0) == 1
assert sackint.gcd(1, -2) == 1
assert sackint.gcd(-1, 2) == 1
assert sackint.gcd(-4, 6) == 2
assert sackint.gcd(4, -6) == 2
assert sackint.gcd(-4, -6) == 2
def test_extgcd():
assert sackint.extgcd(0,1) == [1, 0, 1]
assert sackint.extgcd(1,1) == [1, 0, 1]
assert sackint.extgcd(1,2) == [1, 1, 0]
assert sackint.extgcd(4,6) == [2, -1, 1]
assert sackint.extgcd(1296,1728) == [432, -1, 1]
assert sackint.extgcd(1728,1296) == [432, 1, -1]
assert sackint.extgcd(18446744073709551615, 13715753599) == [6700417, -99, 133148182498]
assert sackint.extgcd(13715753599, 18446744073709551615) == [6700417, 133148182498, -99]
def test_eulerphi():
assert sackint.eulerphi(0) == 0
assert sackint.eulerphi(1) == 0
assert sackint.eulerphi(2) == 1
assert sackint.eulerphi(3) == 2
assert sackint.eulerphi(4) == 2
assert sackint.eulerphi(5) == 4
assert sackint.eulerphi(6) == 2
assert sackint.eulerphi(8) == 4
assert sackint.eulerphi(10) == 4
assert sackint.eulerphi(100) == 40
assert sackint.eulerphi(101) == 100
assert sackint.eulerphi(1000) == 400
assert sackint.eulerphi(1001) == 720
def test_intexp():
assert sackint.intexp(0, 0) == 1
assert sackint.intexp(0, 1) == 0
assert sackint.intexp(0, 3) == 0
assert sackint.intexp(1, 0) == 1
assert sackint.intexp(2, 0) == 1
assert sackint.intexp(3, 0) == 1
assert sackint.intexp(7, 5) == 16807
def test_intmodexp():
assert sackint.intmodexp(1, 0, 11) == 1
assert sackint.intmodexp(1, 1, 11) == 1
assert sackint.intmodexp(1, -2, 11) == 1
assert sackint.intmodexp(2, 0, 11) == 1
assert sackint.intmodexp(2, 1, 11) == 2
assert sackint.intmodexp(2, -1, 11) == 6
assert sackint.intmodexp(2, -2, 11) == 3
assert sackint.intmodexp(2, 5, 11) == 10
assert sackint.intmodexp(2, 9, 11) == 6
assert sackint.intmodexp(2, 10, 11) == 1
assert sackint.intmodexp(2, 0, 12) == 1
assert sackint.intmodexp(2, 1, 12) == 2
assert sackint.intmodexp(2, 10, 12) == 4
assert sackint.intmodexp(3, 0, 11) == 1
assert sackint.intmodexp(3, 1, 11) == 3
assert sackint.intmodexp(3, -1, 11) == 4
assert sackint.intmodexp(3, -2, 11) == 5
assert sackint.intmodexp(3, 5, 11) == 1
assert sackint.intmodexp(3, 9, 11) == 4
assert sackint.intmodexp(3, 10, 11) == 1
def test_intmodrecip():
assert sackint.intmodrecip(1, 11) == 1
assert sackint.intmodrecip(2, 11) == 6
assert sackint.intmodrecip(7, 11) == 8
with pytest.raises(ValueError):
sackint.intmodrecip(2, 4)
def test_factorial():
assert sackint.factorial(0) == 1
assert sackint.factorial(1) == 1
assert sackint.factorial(2) == 2
assert sackint.factorial(3) == 6
assert sackint.factorial(4) == 24
assert sackint.factorial(5) == 120
assert sackint.factorial(37) == 13763753091226345046315979581580902400000000
with pytest.raises(ValueError):
sackint.factorial(-1)
with pytest.raises(TypeError):
sackint.factorial(8.5)
def test_num_ptns():
assert sackint.num_ptns(-2) == 0
assert sackint.num_ptns(-1) == 0
assert sackint.num_ptns(0) == 1
with pytest.raises(TypeError):
sackint.num_ptns(1.5)
assert sackint.num_ptns(1) == 1
assert sackint.num_ptns(2) == 2
assert sackint.num_ptns(3) == 3
assert sackint.num_ptns(4) == 5
assert sackint.num_ptns(5) == 7
assert sackint.num_ptns(30) == 5604
def test_num_ptnsm():
assert sackint.num_ptnsm(30, 30) == 5604
assert sackint.num_ptnsm(30, 29) == 5603
assert sackint.num_ptnsm(30, 28) == 5602
assert sackint.num_ptnsm(30, 15) == 5096
assert sackint.num_ptnsm(30, 5) == 674
assert sackint.num_ptnsm(30, 2) == 16
assert sackint.num_ptnsm(30, 1) == 1
assert sackint.num_ptnsm(30, 0) == 0
def test_ptns():
assert sackint.ptns(0) == [[]]
assert sackint.ptns(1) == [[1]]
assert sackint.ptns(2) == [[1, 1], [2]]
assert sackint.ptns(3) == [[1, 1, 1], [2, 1], [3]]
assert sackint.ptns(4) == [[1, 1, 1, 1], [2, 1, 1], [2, 2], [3, 1], [4]]
def test_ptnsm():
assert sackint.ptnsm(4,0) == []
assert sackint.ptnsm(4,1) == [[1, 1, 1, 1]]
assert sackint.ptnsm(4,2) == [[1, 1, 1, 1], [2, 1, 1], [2, 2]]
assert sackint.ptnsm(4,3) == [[1, 1, 1, 1], [2, 1, 1], [2, 2], [3, 1]]
assert sackint.ptnsm(4,4) == [[1, 1, 1, 1], [2, 1, 1], [2, 2], [3, 1], [4]]
assert sackint.ptnsm(4,5) == [[1, 1, 1, 1], [2, 1, 1], [2, 2], [3, 1], [4]]
|
import pandas as ps
import json
def readFiles():
player_details = ps.read_csv(filepath_or_buffer='D:/Semester1/VisualAnalytics/Project/indian-premier-league-csv-dataset/Player.csv',sep=',')
return player_details
def getDetails(player_details,player_id):
detailshash = {}
detailshash['name'] = player_details.at[player_details[player_details['Player_Id'] == player_id].index[0], 'Player_Name']
detailshash['country'] = player_details.at[player_details[player_details['Player_Id'] == player_id].index[0], 'Country']
detailshash['age'] = 2017 - int("19"+player_details.at[player_details[player_details['Player_Id'] == player_id].index[0], 'DOB'].split('-')[2])
return detailshash
def getPlayerDetails(player_id):
player_details = readFiles()
print(json.dumps(getDetails(player_details,player_id)))
getPlayerDetails(4) |
from django.apps import AppConfig
class MydbDataLayerConfig(AppConfig):
name = 'mydb_data_layer'
|
from flask import render_template, Blueprint, jsonify
pages = Blueprint('pages', __name__)
@pages.route('/', methods=['GET'])
def pre_ship():
try:
return render_template('index.html')
except:
return jsonify(**{'message': 'Unexpected Error'}), ErrorCode_ServerError |
# import os
class Foo:
def __init__(self, bar, ):
self.bar = bar
self.baz = baz
def main():
foo = Foo('qux', 1)
someFunction('Matt')
def someFunction(name):
print('Hello, ' + name)
if __name__ == "__main__":
main()
|
"""
1012. 数字分类 (20)
时间限制
100 ms
内存限制
65536 kB
代码长度限制
8000 B
判题程序
Standard
作者
CHEN, Yue
给定一系列正整数,请按要求对数字进行分类,并输出以下5个数字:
A1 = 能被5整除的数字中所有偶数的和;
A2 = 将被5除后余1的数字按给出顺序进行交错求和,即计算n1-n2+n3-n4...;
A3 = 被5除后余2的数字的个数;
A4 = 被5除后余3的数字的平均数,精确到小数点后1位;
A5 = 被5除后余4的数字中最大数字。
输入格式:
每个输入包含1个测试用例。每个测试用例先给出一个不超过1000的正整数N,随后给出N个不超过1000的待分类的正整数。
数字间以空格分隔。
输出格式:
对给定的N个正整数,按题目要求计算A1~A5并在一行中顺序输出。数字间以空格分隔,但行末不得有多余空格。
若其中某一类数字不存在,则在相应位置输出“N”。
输入样例1:
13 1 2 3 4 5 6 7 8 9 10 20 16 18
输出样例1:
30 11 2 9.7 9
输入样例2:
8 1 2 4 5 6 7 9 16
输出样例2:
N 11 2 N 9
"""
# -*-encoding:utf-8-*-
if __name__ == "__main__":
ips = [int(_) for _ in input().split()]
A1, A2, A3, A4, A5 = [], [], [], [], []
for i in range(1, ips[0] + 1):
if ips[i] % 5 == 0 and ips[i] % 2 == 0:
A1.append(ips[i])
elif ips[i] % 5 == 1:
if len(A2) % 2 == 0:
A2.append(ips[i])
else:
A2.append(-ips[i])
elif ips[i] % 5 == 2:
A3.append(ips[i])
elif ips[i] % 5 == 3:
A4.append(ips[i])
elif ips[i] % 5 == 4:
A5.append(ips[i])
if len(A1):
print(sum(A1), end=" ")
else:
print("N", end=" ")
if len(A2):
print(sum(A2), end=" ")
else:
print("N", end=" ")
if len(A3):
print(len(A3), end=" ")
else:
print("N", end=" ")
if len(A4):
print("%.1f" % (sum(A4) / len(A4)), end=" ")
else:
print("N", end=" ")
if len(A5):
print(max(A5))
else:
print("N")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class DatadigitalFincloudGeneralsaasFaceVerificationInitializeResponse(AlipayResponse):
def __init__(self):
super(DatadigitalFincloudGeneralsaasFaceVerificationInitializeResponse, self).__init__()
self._certify_id = None
self._page_url = None
@property
def certify_id(self):
return self._certify_id
@certify_id.setter
def certify_id(self, value):
self._certify_id = value
@property
def page_url(self):
return self._page_url
@page_url.setter
def page_url(self, value):
self._page_url = value
def parse_response_content(self, response_content):
response = super(DatadigitalFincloudGeneralsaasFaceVerificationInitializeResponse, self).parse_response_content(response_content)
if 'certify_id' in response:
self.certify_id = response['certify_id']
if 'page_url' in response:
self.page_url = response['page_url']
|
from django.db import models
from django.core.validators import URLValidator
from redactor.fields import RedactorField
# Create your models here.
class Linea_de_servicio(models.Model):
nombre = models.CharField(max_length=80)
imagen = models.ImageField(upload_to='uploads', default='imagen/default.png')
imagen_interna = models.ImageField(upload_to='uploads', default='imagen/default.png')
texto_lema = RedactorField(verbose_name=u'Lema', default=' ')
texto_descripcion = RedactorField(verbose_name=u'Descripcion', default=' ')
def __unicode__(self):
return self.nombre
class Linea_individual(models.Model):
nombre = models.CharField(max_length=100)
linea = models.ForeignKey(Linea_de_servicio, blank=True, default='')
contenido = RedactorField(verbose_name=u'Descripcion', default=' ')
texto_descripcion_en = RedactorField(verbose_name=u'Descripcion Ingles', default=' ', blank=True)
texto_descripcion_fr = RedactorField(verbose_name=u'Descripcion Frances', default=' ', blank=True)
def __unicode__(self):
return self.nombre + u" --- " + self.linea.nombre
class Linea_general(models.Model):
nombre = models.CharField(max_length=100)
imagen_internacional = models.ImageField(upload_to='uploads', default='imagen/default.png')
imagen_propiedad = models.ImageField(upload_to='uploads', default='imagen/default.png')
imagen_negocios = models.ImageField(upload_to='uploads', default='imagen/default.png')
imagen_litigios = models.ImageField(upload_to='uploads', default='imagen/default.png')
imagen_internacional_en = models.ImageField(upload_to='uploads', default='imagen/default.png')
imagen_propiedad_en = models.ImageField(upload_to='uploads', default='imagen/default.png')
imagen_negocios_en = models.ImageField(upload_to='uploads', default='imagen/default.png')
imagen_litigios_en = models.ImageField(upload_to='uploads', default='imagen/default.png')
imagen_internacional_fr = models.ImageField(upload_to='uploads', default='imagen/default.png')
imagen_propiedad_fr = models.ImageField(upload_to='uploads', default='imagen/default.png')
imagen_negocios_fr = models.ImageField(upload_to='uploads', default='imagen/default.png')
imagen_litigios_fr = models.ImageField(upload_to='uploads', default='imagen/default.png')
def __unicode__(self):
return self.nombre |
from dsg.Configuration import Configuration, FilterType
import numpy as np
from typing import List, Tuple
generator = np.random.default_rng()
def coefficient(z: np.ndarray, refractive_indices: np.ndarray, wavelength) -> float:
k_air = refractive_indices[0] * 2.0 * np.pi / wavelength
k_2 = k_air
m = np.identity(2, dtype="complex128")
for i in range(0, len(z)):
k_1 = k_2
k_2 = refractive_indices[i + 1] * 2.0 * np.pi / wavelength
m_11 = 0.5 * (1.0 + k_2 / k_1) * np.exp(1j * (k_2 - k_1) * z[i])
m_12 = 0.5 * (1.0 - k_2 / k_1) * np.exp(-1j * (k_2 + k_1) * z[i])
m_21 = 0.5 * (1.0 - k_2 / k_1) * np.exp(1j * (k_2 + k_1) * z[i])
m_22 = 0.5 * (1.0 + k_2 / k_1) * np.exp(-1j * (k_2 - k_1) * z[i])
m = np.matmul(m, np.array([[m_11, m_12], [m_21, m_22]]))
return k_2 / k_air * np.square(np.abs(1 / m[0][0]))
def coefficient_params(substrate: List[float]) -> Tuple[np.ndarray, np.ndarray]:
z = np.cumsum(substrate)
refractive_indices = np.concatenate(
(np.array([1]), np.tile([2.2, 4.2], int(len(z) / 2)))
)
return z, refractive_indices
# individuals must be ascending sorted (0.1, 0.2, 0.3, ...)
def cull_individuals(config: Configuration, individuals):
culled = individuals
while len(culled) > config.population_size - config.reproduction_size:
death_chance = 1 / len(culled)
for i in range(0, len(culled) - 1): # Don't kill the best member
die = generator.uniform(0, 1)
if die < death_chance:
culled.remove(culled[i])
break
return culled
def exclusion_wavelength(config: Configuration, wavelength: float) -> bool:
if wavelength < config.inclusion_low or wavelength > config.inclusion_high:
return True
return False
def generate_child(config: Configuration, parent_1: List[float], parent_2: List[float]):
child = []
if len(parent_1) > len(parent_2):
for i in range(0, len(parent_2)):
coin = generator.integers(0, 1)
if coin == 0:
child.append(parent_1[i])
else:
child.append(parent_2[i])
child += parent_1[len(parent_2):]
else:
for i in range(0, len(parent_1)):
coin = generator.integers(0, 1)
if coin == 0:
child.append(parent_1[i])
else:
child.append(parent_2[i])
child += parent_2[len(parent_1):]
return mutate_substrate(config, child)
def generate_substrate(config: Configuration) -> List[float]:
n_pairs = generator.integers(config.pairs_min, config.pairs_max)
substrate = []
for i in range(0, n_pairs):
p_1 = generator.triangular(left=config.z_min, mode=(config.z_min + config.z_max) / 2.0, right=config.z_max)
p_2 = generator.triangular(left=config.z_min, mode=(config.z_min + config.z_max) / 2.0, right=config.z_max)
substrate.append(p_1)
substrate.append(p_2)
return substrate
def mutate_substrate(config: Configuration, substrate: List[float]) -> List[float]:
mutated = substrate
# Flip to mutate n_pairs
coin = generator.integers(0, 1)
if coin == 1:
# Flip to add or remove
coin = generator.integers(0, 1)
if coin == 1: # add
p_1 = generator.triangular(left=config.z_min, mode=(config.z_min + config.z_max) / 2.0, right=config.z_max)
p_2 = generator.triangular(left=config.z_min, mode=(config.z_min + config.z_max) / 2.0, right=config.z_max)
mutated.append(p_1)
mutated.append(p_2)
elif len(mutated) > 2:
index = generator.integers(0, int(len(mutated) / 2))
p_1 = mutated[index]
p_2 = mutated[index + 1]
mutated.remove(p_1)
mutated.remove(p_2)
# Z mutation
for i in range(0, len(mutated)):
die = generator.uniform(0, 1)
if die < config.mutation_z_chance:
coin = generator.integers(0, 1)
amount = generator.uniform(0, config.mutation_z_factor)
if coin == 1:
new_z = mutated[i] * (1 + amount)
else:
new_z = mutated[i] * (1 - amount)
mutated[i] = new_z
return mutated
def select_parent(individuals):
reproduction_chance = 1 / len(individuals)
parent = None
index = 0
while parent is None:
die = generator.uniform(0, 1)
if die < reproduction_chance:
parent = individuals[index]
else:
index += 1
if index >= len(individuals):
index = 0
return parent
def select_parents(individuals):
parent_1 = select_parent(individuals)
parent_2 = None
while parent_2 is None:
candidate = select_parent(individuals)
if parent_1 != candidate:
parent_2 = candidate
return parent_1, parent_2
def wavelengths(config: Configuration) -> np.ndarray:
if config.filter_type == FilterType.LOW_PASS:
return np.linspace(config.inclusion_low, config.exclusion_high, config.samples)
elif config.filter_type == FilterType.HIGH_PASS:
return np.linspace(config.exclusion_low, config.inclusion_high, config.samples)
elif config.filter_type == FilterType.FULL:
return np.linspace(config.exclusion_low, config.exclusion_high, config.samples)
|
import sys
import os
import hashlib
import json
import time
init_time = time.clock()
try:
fd = open('output.json', 'w')
except Exception as E:
print 'generic exception raised'
print str(E)
def hash_md5(file):
m = hashlib.md5()
try:
fd = open(file, 'rb').read()
except Exception as E:
print 'generic exception raised'
print str(E)
m.update('')
else:
m.update(open(file, 'rb').read())
return m.hexdigest()
def hash_sha256(file):
m = hashlib.sha256()
m.update(open(file, 'rb').read())
return m.hexdigest()
if len(sys.argv) > 1:
#print os.path.realpath(sys.argv[1])
dir = os.path.dirname(sys.argv[1])
big_json = {}
for root, dirs, files in os.walk(dir):
print 'root: ', root, type(root)
print 'dirs: ', dirs, type(dirs)
for file in files:
small_json = {}
print file
print '\t', hash_md5(os.path.join(root, file))
print '\t', hash_sha256(os.path.join(root, file))
small_json['md5'] = hash_md5(os.path.join(root, file))
small_json['sha256'] = hash_sha256(os.path.join(root, file))
small_json['nume_fisier'] = file
big_json[file] = small_json
fd.write( json.dumps(big_json, sort_keys=True, indent=4, separators=(',', ':')) )
fd.close()
print 'time elapsed: ', time.clock() - init_time |
import tensorflow as tf
def generator(z, feature_depth, hidden_sizes=[128, 256, 256]):
h = z
for i, hidden_size in enumerate(hidden_sizes):
h = tf.layers.dense(
inputs=h,
units=hidden_size,
activation=tf.nn.leaky_relu,
name="dense_%i" % i
)
g = tf.layers.dense(
inputs=h,
units=feature_depth,
activation=tf.nn.tanh,
name="dense_output"
)
return g
def discriminator(x, hidden_sizes=[256, 256, 128]):
h = x
for i, hidden_size in enumerate(hidden_sizes):
h = tf.layers.dense(
inputs=h,
units=hidden_size,
activation=tf.nn.relu,
name="dense_%i" % i
)
score_logits = tf.layers.dense(
inputs=h,
units=1,
name="dense_output"
)
return score_logits
class VanillaGAN(object):
def __init__(self, x, z, feature_depth):
self.x = x
self.z = z
self.feature_depth = feature_depth
with tf.variable_scope("generator", reuse=tf.AUTO_REUSE):
self.g = generator(self.z, self.feature_depth)
with tf.variable_scope("discriminator", reuse=tf.AUTO_REUSE):
self.x_score_logits = discriminator(self.x)
self.g_score_logits = discriminator(self.g)
def get_minimax_losses(self):
loss_g = \
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(self.g_score_logits),
logits=self.g_score_logits
))
loss_d = \
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(self.x_score_logits),
logits=self.x_score_logits
)) + \
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.zeros_like(self.g_score_logits),
logits=self.g_score_logits
))
vars_g = tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES, scope="generator")
vars_d = tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES, scope="discriminator")
return loss_g, loss_d, vars_g, vars_d |
import cv2
import time
from threading import *
class BuildinCamera(Thread):
def __init__(self):
Thread.__init__(self)
self.camera = None
self.conf = None
self.stream = None
self.consumer = None
self.log = None
self.plugins = []
self.socket = None
self.setfname = None
self.state = 'init'
self.video_port = False
self.video_path = None
self.frame_no = 0
self.streamLength = 0
self.stm = time.time()
self.conf = {
'resolution' : (1024, 768),
'framerate' : 5,
'video_port' : False,
'exposure_mode': 'auto',
'awb_mode' : 'auto',
'iso' : 0,
'shutter_speed': 0,
'zoom' : (0.0, 0.0, 1.0, 1.0)
}
def init_socket(self, video_path=None):
self.video_path = video_path
ret = self.init_camera(self)
self.start()
return ret
def init_camera(self, conf=None, stream=None, fname="undefined.json"):
if self.state == 'init':
if conf:
self.conf = conf
self.setfname = self.conf['file_name'] if 'file_name' in conf else fname
if self.video_path is None:
self.camera = cv2.VideoCapture(0)
else:
self.camera = cv2.VideoCapture(self.video_path)
# set default camera properties
# if 'video_port' in conf:
# self.video_port = self.conf['video_port']
self.stream = stream
self.state = "idle"
else:
print("Buildin camera already initialized. Current state is '{}'".format(self.state))
# self.log = get_camera_settings(self)
# print(self.log)
return self.state
def close(self):
self.close_camera()
def close_camera(self):
if self.state == 'init':
return
if self.state == 'streaming':
self.state = 'closing'
i=0
while self.state != 'closed':
i += 1
print("Finishing current capture{}".format('.' * i), end='\r')
time.sleep(1)
self.stream.flush()
self.camera.release()
# self.stream = None
def set_consumer(self, handler):
self.consumer = handler
def set_stream(self, stream):
self.stream = stream
def run(self):
self.state = "streaming"
self.stm = time.time()
self.frame_no = 0
while True:
if self.state == 'streaming':
# for foo in self.camera.capture_continuous(stream, 'jpeg', use_video_port=self.video_port):
ret, frame = self.camera.read()
self.frame_no += 1
self.streamLength = len(frame)
self.stream.write(frame)
self.stream.flush()
# persistent processors
for plugin in self.plugins:
plugin(self)
if self.streamLength > 0 and self.consumer is not None:
self.consumer(self, self.streamLength, frame)
self.stm = time.time()
if self.state in ['pause', 'closing']:
break
self.state = "closed"
print("PiCamera thread stopped!")
|
#!/usr/bin/env python
# coding: utf-8
# In[142]:
import numpy as np
import os
import pandas as pd
import time
from datetime import datetime
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as stats
get_ipython().run_line_magic('matplotlib', 'inline')
plt.style.use('seaborn-whitegrid')
# In[119]:
df = pd.read_csv("historical_dataset.csv")
# In[120]:
print (df)
# In[121]:
df.head()
# In[122]:
df.info()
# In[123]:
df.sort_values(['date'], ascending = [True], inplace = True)
df.head()
# In[124]:
print (df.iloc[:,2])
# In[125]:
target = df.iloc[:,2].values
data = df.iloc[:,4:6]
print(data.head())
# In[126]:
regression = linear_model.LinearRegression()
regression.fit(data, target)
X = [
[0,0],
[10,10],
]
results = regression.predict(X)
print(results)
# In[127]:
N= 300
open_price=df.iloc[:,2]
day_low=df.iloc[:,4]
plt.scatter(day_low, open_price, color='g')
plt.xlabel('day_low')
plt.ylabel('open_price')
plt.show()
# In[128]:
N=300
open_price=df.iloc[:,2]
close_price=df.iloc[:,5]
plt.scatter(close_price, open_price, color='r')
plt.xlabel('close_price')
plt.ylabel('open_price')
plt.show()
# In[151]:
df['close']=df.iloc[:,5]
df['open']=df.iloc[:,2]
# In[148]:
df['close']=df.iloc[:,5]
print (df['close'])
# In[159]:
data=df['close']
target=df['open']
model=sm.OLS(target,data).fit()
predictions=model.predict(data)
model.summary()
# In[158]:
data=df['close']
target=df['open']
data = sm.add_constant(data)
model=sm.OLS(target, data).fit()
predictions=model.predict(data)
model.summary()
# In[160]:
df['close']= df.iloc[:,5]
df['day_high']=df.iloc[:,3]
# In[161]:
data=df['close']
target=df['day_high']
data = sm.add_constant(data)
model=sm.OLS(target, data).fit()
predictions=model.predict(data)
model.summary()
# In[163]:
df['close']= df.iloc[:,5]
df['day_low']=df.iloc[:,4]
# In[164]:
data=df['close']
target=df['day_low']
data = sm.add_constant(data)
model=sm.OLS(target, data).fit()
predictions=model.predict(data)
model.summary()
# In[ ]:
# In[ ]:
|
import sys
import math
a = int(sys.argv[1])
b = int(sys.argv[2])
c = int(sys.argv[3])
delta = (b*b) - 4*a*c
if(delta == 0):
x1 = -b/2*a
print("1")
print(format(x1))
elif(delta > 0):
x1 = (-b + math.sqrt(delta))/2*a
x2 = (-b - math.sqrt(delta))/2*a
print("2")
print(format(x1) + " " + format(x2))
else:
print(0)
|
# Generated by Django 2.1.7 on 2019-03-12 14:55
import ckeditor.fields
import django.core.files.storage
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0014_auto_20190312_1707'),
]
operations = [
migrations.AlterField(
model_name='post',
name='category',
field=models.ForeignKey(blank=True, default='', null=True, on_delete=django.db.models.deletion.SET_NULL, to='blog.Category'),
),
migrations.AlterField(
model_name='post',
name='logo',
field=models.ImageField(blank=True, default=0, storage=django.core.files.storage.FileSystemStorage(location='blog/static/media'), upload_to='logos/'),
preserve_default=False,
),
migrations.AlterField(
model_name='post',
name='more',
field=ckeditor.fields.RichTextField(blank=True),
),
migrations.AlterField(
model_name='post',
name='tags',
field=models.ManyToManyField(blank=True, null=True, to='blog.Tag'),
),
migrations.AlterField(
model_name='presentation',
name='Section',
field=models.IntegerField(choices=[(0, 'Proclamation'), (2, 'Partners'), (1, 'Advantages')]),
),
]
|
# Exercicio 18
print("Verificação de uma data")
dia = int(input("Digite o dia no formato dd: "))
mes = int(input("Digite o mês no formato mm: "))
ano = int(input("Digite o dia no formato aaaa: "))
validacao = False
if mes == 1 or mes == 3 or mes == 5 or mes == 7 or mes == 8 or mes == 10 or mes == 12:
if 31 >= dia >= 1:
validacao = True
elif mes == 4 or mes ==6 or mes == 9 or mes == 11:
if 30 >= dia >= 1:
validacao = True
elif mes == 2:
if (ano % 4) == 0 and (ano % 100) != 0 or (ano % 400) == 0:
if 29 >= dia >= 1:
validacao = True
else:
validacao = False
if validacao:
print("Data é valida")
else:
print("Data invalida")
|
# encoding:utf-8
import xlrd
class GetRowAndColNumber():
def getRowAndColNumber(self,excel_path,sheet_name,key):
"""该函数的作用:通过参数sheet_name和key,去返回一个该key所在行号和列号的列表"""
row_col_list=[]
data=xlrd.open_workbook(excel_path)
table=data.sheet_by_name(sheet_name)
rows=table.nrows
cols=table.ncols
for r in range(rows):
for c in range(cols):
if table.cell_value(r,c)==key:
row_col_list.append(r)
row_col_list.append(c)
# print row_col_list
# print table.cell_value(r,c)
return row_col_list
# pp=GetRowAndColNumber()
# pp.getRowAndColNumber(r"G:\HuaYing\HuaYingData\login_data.xls","username_password_data","case_0004") |
import math
x = float('nan')
math.isnan(x)
True
f = open("lista.txt", "r")
l = []
for x in f:
s = x.split(" ")
if(len(s)>1):
a =''
for k in s:
a+= k.strip() + " "
l.append(a+"\n")
""" for x in f:
s = x.split(" ")
#print(s)
if(len(s)>1):
for el in range(len(s)):
if(el ==)
l.append(el.strip())
for el in s:
el = float(str(el))
if(math.isnan(el)):
print(el)
if(el[0] =='('):
l.append(el[1:len(el) -3])"""
print(l)
f = open("lis.txt", "a")
for h in l:
f.write(h)
f.close() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Classes for making multiple regex replacements."""
import sys
import regex as re
__author__ = 'Victoria Morris'
__license__ = 'MIT License'
__version__ = '1.0.0'
__status__ = '4 - Beta Development'
class MultiRegex(object):
simple = False
regexes = ()
def __init__(self):
try:
self._rx = re.compile('|'.join(self.regexes), flags=re.IGNORECASE)
except:
for r in self.regexes:
try:
re.compile(r)
except:
print('Error in regex: {}'.format(str(r)))
def sub(self, s):
if not s or s is None: return ''
return self._rx.sub(self._sub, s)
def _sub(self, mo):
try:
for k, v in mo.groupdict().items():
if v:
if k == 'AllElse':
return ''
if 'UUU' in str(k):
return bytes(str(k).replace('UUU', '\\' + 'u'), 'ascii').decode('unicode-escape')
try:
sub = getattr(self, k)
if callable(sub):
return sub(mo)
else:
return sub
except:
return str(k)
except:
print('\nError MR: {0}\n'.format(str(sys.exc_info())))
class Abbreviations(MultiRegex):
simple = True
regexes = (
r'(?P<January>^jan(uary)?\.*$)',
r'(?P<February>^feb(ruary)?\.*$)',
r'(?P<March>^m(ar|rz)(ch)?\.*$)',
r'(?P<April>^apr(il)?\.*$)',
r'(?P<June>^june?\.*$)',
r'(?P<July>^july?\.*$)',
r'(?P<August>^aug(ust)?\.*$)',
r'(?P<September>^sept?(ember)?\.*$)',
r'(?P<October>^o[ck]t(ober)?\.*$)',
r'(?P<November>^nov(ember)?\.*$)',
r'(?P<December>^de[cz](ember)?\.*$)',
r'(?P<Monday>^mon(day)?s?\.*$)',
r'(?P<Tuesday>^tues?(day)?s?\.*$)',
r'(?P<Wednesday>^wed(ne)?s?(day)?s?\.*$)',
r'(?P<Thursday>^thur?s?(day)?s?\.*$)',
r'(?P<Friday>^fri(day)?s?\.*$)',
r'(?P<Saturday>^sat(urday)?s?\.*$)',
r'(?P<Sunday>^sun(day)?s?\.*$)',
r'(?P<Abbildung>^abb(ildung)?\.*$)', # German, illustration, figure
r'(?P<Abdruck>^abdr(uck)?\.*$)', # German, impression, print, reproduction
r'(?P<Abhandlung>^abh(andlung)?\.*$)', # German, treatise
r'(?P<AbkUUU00FCrzung>^abk(.rzung)?\.*$)', # German, abbreviation
r'(?P<Abschrift>^abschr(ift)?\.*$)', # German, reprint, copy
r'(?P<Abteilung>^abt(eilung)?\.*$)', # German
r'(?P<approximately>^(ca|approx)\.*$)',
r'(?P<Auflage>^aufl(age)?\.*$)', # German, edition
r'(?P<Ausgabe>^ausg(abe)?\.*$)', # German, edition
r'(?P<augmented>^aug(mented)\.*$)',
r'(?P<BUUU00E4ndchen>^b(aen)?dche?n\.*$)', # German
r'(?P<BUUU00E4nde>^b(ae?n)?de\.*$)', # German
r'(?P<Band>^b(an)?d\.*$)', # German, volume
r'(?P<Bearbeitung>^bearb(eitung)?\.*$)', # German, arrangement
r'(?P<Beiheft>^beih(eft)?\.*$)', # German, supplement
r'(?P<Beispiel>^beisp(iel)?\.*$)', # German, example
r'(?P<beziehungsweise>^be?z(iehungs)?w(eise)?\.*$)', # German, respectively; or, or else; more specifically
r'(?P<bibliography>^bibl(iog)?(raphy)?\.*$)',
r'(?P<books>^bo*ks\.*$)',
r'(?P<book>^bo*k\.*$)',
r'(?P<Buchhandler>^buchh(andler)?\.*$)', # German, bookseller
r'(?P<CDs>^cd-?(rom)?s\.*$)',
r'(?P<CD>^cd-?(rom)?\.*$)',
r'(?P<chiefly>^chiefle*y\.*$)',
r'(?P<cm>^cm\.*$)',
r'(?P<coloured>^colo+u?red\.*$)',
r'(?P<colour>^col(o+u?r|eur)?\.*$)',
r'(?P<columns>^col(umn)?s\.*$)',
r'(?P<corrected>^corr(ected)?\.*$)',
r'(?P<cover>^couv(erture)?\.*$)',
r'(?P<deel>^de*l\.*$)', # Dutch
r'(?P<Department>^dept\.*$)',
r'(?P<diagrams>^diagra?m?s*\.*$)',
r'(?P<dopolnennoe>^dop(ol)?(nennoe)?\.*$)', # Russian
r'(?P<DVDs>^dvd-?(rom)?s\.*$)',
r'(?P<DVD>^dvd-?(rom)?\.*$)',
r'(?P<UUU00E9dition>^[\u00e9\u00C9]d(ition)?\.*$)', # édition
r'(?P<edition>^ed(itio)?n?\.*$)',
r'(?P<Einleitung>^einl(eitung)?\.*$)', # German, introduction
r'(?P<ekdosi>^ekd(osi)?\.*$)', # Greek
r'(?P<engraved>^engr(aved)?\.*$)',
r'(?P<enlarged>^enl(arged)?\.*$)',
r'(?P<erweiterte>^erw(eit)?(erte)?\.*$)', # German
r'(?P<fascicule>^fasc(icule)?\.*$)', # French
r'(?P<facsimiles>^fa(cs|sc)(im)?(ile)?s\.*$)',
r'(?P<facsimile>^fa(cs|sc)(im)?(ile)?\.*$)',
r'(?P<feet>^f[e]*t\.*$)',
r'(?P<figures>^fig(ures)?s*\.*$)',
r'(?P<folded>^(ofld|fold(ed)?)\.*$)',
r'(?P<folio>^fol[io.]*\.*$)',
r'(?P<folios>^fol[io.]*s\.*$)',
r'(?P<frames>^fr(ame)?s*\.*$)',
r'(?P<frontispiece>^front(\.|is)(piece)?\.*$)',
r'(?P<gedruckt>^gedr(uckt)?\.*$)', # German, printed
r'(?P<Gegenwart>^gegenw(art)?\.*$)', # German, present time
r'(?P<genealogical>^geneal(ogical)?\.*$)',
r'(?P<geological>^geol(og)?(ical)?\.*$)',
r'(?P<garren>^g(arre)?n\.*$)', # Basque, nth
r'(?P<Handbuch>^h(an)?db(uch)?\.*$)', # German, handbook, manual
r'(?P<hardback>^h(ard)?b(ac)?k\.*$)',
r'(?P<Hefte>^he*fte\.*$)', # German
r'(?P<Heft>^he*ft\.*$)', # German
r'(?P<Herausgeber>^he?r(au)?sg(eber)?\.*$)', # German, editor
r'(?P<illustrations>^a?il+u?s?(tration.*)?s?\.*$)',
r'(?P<impression>^impr(ession)?\.*$)',
r'(?P<including>^incl?(uding)?\.*$)',
r'(?P<introduction>^introd(uction)?\.*$)',
r'(?P<ispravlennoe>^ispr(avl)?(ennoe)?\.*$)', # Russian
r'(?P<izdaniye>^izd(aniye)?\.*$)', # Russian
r'(?P<Jahreszahl>^j(ahres)?z(ah)?l\.*$)', # German, date, year
r'(?P<jaargang>^jaarg(ang)?\.*$)', # Dutch
r'(?P<Jahrgang>^jahrg(ang)?\.*$)', # German
r'(?P<Jahrhundert>^j(ahr)?h(undert)?\.*$)', # German, century
r'(?P<knjiga>^knj(iga)?\.*$)', # Croatian
r'(?P<mahadurah>^mahad(urah)?\.*$)', # Hebrew
r'(?P<manuscript>^m(ss*|anuscripts?)\.*$)',
r'(?P<microfiche>^micr[io]-*fiches*\.*$)',
r'(?P<microfilm>^micr[io]-*film*\.*$)',
r'(?P<minutes>^min(ute)?s\.*$)',
r'(?P<Mitarbeiter>^mitarb(eiter)?\.*$)', # German, collaborator
r'(?P<Mitwirkung>^mitw(irkung)?\.*$)', # German, cooperation
r'(?P<mm>^mm\.*$)',
r'(?P<music>^mus(ic)?\.*$)',
r'(?P<Nachricht>^nachr(icht)?\.*$)', # German, communication, report, notice
r'(?P<Nachwort>^nachw(ort)?\.*$)', # German, concluding remarks, epilogue
r'(?P<nakladateUUU0142stvUUU00ed>^nakl(ad)?(ate)?\.*$)', # Czech, nakladatełství
r'(?P<Neudruck>^neudr(uck)?\.*$)', # German, reprint
r'(?P<nouvelle>^nouv(elle)?\.*$)', # French
r'(?P<numbers>^n-*(o|ro?|um+b?ero?)s*\.*$)',
r'(?P<oblong>^obl(ong)?\.*$)',
r'(?P<Originalausgabe>^Originalausg(abe)?\.*$)', # German
r'(?P<pages>^pp+(age)?s*\.*$)',
r'(?P<paperback>^p(aper)?b(ac)?k\.*$)',
r'(?P<parts>^p(ar)?t\.*$)',
r'(?P<patippu>^pat(ippu)?\.*$)', # Russian
r'(?P<plates>^pl(at)?e?s*\.*$)',
r'(?P<poprawione>^popr(awione)?\.*$)', # Polish, corrected
r'(?P<portraits>^portr?(ait)?s*\.*$)',
r'(?P<reprinted>^re-*pr(int)?(ed)?\.*$)',
r'(?P<revised>^rev(ised)?\.*$)',
r'(?P<Sammelwerk>^s(ammel)?w(er)?k\.*$)', # German, collected works
r'(?P<Sammlung>^samml(ung)?\.*$)', # German, collection, compilation, set
r'(?P<Schriftleiter>^schriftl(eiter)?\.*$)', # German, editor
r'(?P<selfUUU002Dportraits>^self-?portr?(ait)?s*\.*$)',
r'(?P<series>^ser(ies)?\.*$)',
r'(?P<sheet>^sh\.*$)',
r'(?P<stereograph>^stereo-?graph\.*$)',
r'(?P<sound>^s(oun)?d\.*$)',
r'(?P<Stimmbuch>^st(imm)?b(uch)?\.*$)', # German, part book
r'(?P<supplement>^suppl?(ement)?\.*$)',
r'(?P<svazek>^sv(azek)?\.*$)', # Czech
r'(?P<tomes>^tome?s*\.*$)',
r'(?P<undUUU0020soUUU0020weiter>^u(nd)?\s*so?\s*w(eiter)?\.*$)', # German, and so forth, etc.
r'(?P<unnumbered>^un-?numbered\.*$)',
r'(?P<updated>^upd(ated)?\.*$)',
r'(?P<uzupeUUU0142nione>^uzup(elnione)?\.*$)', # Polish, uzupełnione
r'(?P<Verfasser>^verf(asser)?\.*$)', # German, composer, writer
r'(?P<vergleich>^vergl(eich)?\.*$)', # German, compare
r'(?P<Verzeichnis>^verz(eichnis)?\.*$)', # German, catalogue
r'(?P<videodisc>^video-*disc\.*$)',
r'(?P<volumes>^vol?(ume)?s*\.*$)',
r'(?P<Vorwort>^vorw(ort)?\.*$)', # German, foreword
r'(?P<vydUUU00E1nUUU00ED>^vyd(ani)?\.*$)', # Czech, vydání
r'(?P<vypusk>^vyp(usk)?\.*$)', # Russian
r'(?P<wydanie>^wyd(anie)?\.*$)', # Polish
r'(?P<years>^y(ea)?rs\.*$)',
r'(?P<year>^y(ea)?r\.*$)',
r'(?P<Zeitschrift>^z(ei)?tschr(ift)?\.*$)', # German, periodical
r'(?P<Zeitung>^z(ei)?t(un)?g\.*$)', # German, newspaper
r'(?P<zeszyt>^zesz(yt)?\.*$)', # Polish
r'(?P<zvezek>^zv(ezek)?\.*$)', # Slovenian, volumes
)
|
class Student(object):
def __init__(self):
self.name = 'xiaozhi'
def __getattr__(self, attr):
if attr=='score':
return 95
stu = Student()
print((stu.name))
print((stu.score))
|
import pymysql
def get_connection():
conn = pymysql.connect(host='127.0.0.1', user='root', password='1234', db='flaskdb1', charset='utf8')
return conn
# 동물 정보 저장하는 함수
def add_animal_info(animal_type, animal_name, animal_age, animal_weight):
conn = get_connection()
sql = '''
insert into animal_table(animal_type, animal_name, animal_age, animal_weight)
values(%s, %s, %s, %s)
'''
cursor = conn.cursor()
cursor.execute(sql, (animal_type, animal_name, animal_age, animal_weight))
conn.commit()
conn.close()
# 동물 정보 검색하는 함수
def get_search_info(animal_name):
conn = get_connection()
sql = '''
select animal_type, animal_name, animal_age, animal_weight
from animal_table
where animal_name = %s
'''
cursor = conn.cursor()
cursor.execute(sql, animal_name)
result = cursor.fetchall()
animal_list = []
for row in result:
animal_dict ={
'animal_type' : row[0],
'animal_name' : row[1],
'animal_age' : row[2],
'animal_weight' : row[3],
}
animal_list.append(animal_dict)
conn.close()
return animal_list
# 동물 전체 데이터를 구하는 함수
def get_total_info():
conn = get_connection()
sql = '''
SELECT animal_type, count(*) FROM flaskdb1.animal_table GROUP BY animal_type;
'''
cursor = conn.cursor()
cursor.execute(sql)
result = cursor.fetchall()
animal_list = []
sum = 0
for row in result:
sum += row[1]
total_dict ={
'animal_type' : row[0],
'animal_count' : row[1],
}
animal_list.append(total_dict)
conn.close()
return (animal_list,sum)
# 전체 동물 리스트 보기
def all_animal_list():
conn = get_connection()
sql = '''
SELECT * FROM flaskdb1.animal_table;
'''
cursor = conn.cursor()
cursor.execute(sql)
result = cursor.fetchall()
animal_list = []
for row in result:
animal_dict ={
'animal_pk' : row[0],
'animal_type' : row[1],
'animal_name' : row[2],
'animal_age' : row[3],
'animal_weight' : row[4]
}
animal_list.append(animal_dict)
conn.close()
return animal_list
|
# Generated by Django 3.2 on 2021-04-15 16:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0003_car'),
]
operations = [
migrations.CreateModel(
name='Members',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.CharField(max_length=255)),
('Roll_Number', models.CharField(max_length=9)),
('FB_Handle', models.CharField(max_length=100)),
('image', models.ImageField(upload_to='MembersPhoto')),
],
),
]
|
weight = float(input("How much do you weigh?"))
Mercury = weight * 0.38
Venus = weight * 0.91
Mars = weight * 0.39
Jupiter = weight * 2.34
Saturn = weight * 1.06
Uranus = weight * 0.92
Pluto = weight *0.06
print(f"On Earth, you weigh {weight}. On other planets, you would weigh..."
, "\n Mercury: ",Mercury, "\n Venus: ",Venus, "\n Mars :",Mars,"\n Jupiter: ",Jupiter,"\n Saturn: ",Saturn, "\n Uranus: ",Uranus,"\n Pluto: ",Pluto,)
|
from __future__ import print_function
import sys
import os
import shelve
import datetime
import time
import bs4
nir_stations_url = "http://www.journeycheck.com/nirailways/route?from=GVA&to=CLA&action=search&savedRoute="
nir_departures_url_template = "http://www.journeycheck.com/nirailways/route?from=%(src)s&to=%(dst)s&action=search&savedRoute="
MAX_STATION_ID_LEN = 3
MIN_STATION_ID_LEN = 3
MAX_CACHE_TIME = datetime.timedelta(hours=12)
class InvalidStationExcept(KeyError):
pass
class InvalidStationIdExcept(InvalidStationExcept):
pass
class InvalidStationNameExcept(InvalidStationExcept):
pass
def datetime_from_nir_time(nir_time):
dt = datetime.datetime.strptime(nir_time, "%H:%M")
return dt
class StationMapper(object):
def __init__(self):
self._ids_to_names = {}
self._names_to_ids = {}
def add_mapping(self, station_id, station_name):
assert station_name != "All Stations"
assert isinstance(station_id, unicode)
assert isinstance(station_name, unicode)
assert len(station_id) <= MAX_STATION_ID_LEN # if this happens, you may have given a name instead of an id
assert len(station_id) >= MIN_STATION_ID_LEN
assert station_id not in self._ids_to_names
assert station_name not in self._names_to_ids
self._ids_to_names[station_id] = station_name
self._names_to_ids[station_name] = station_id
assert station_name in self._names_to_ids
assert station_id in self._ids_to_names
assert self._ids_to_names[station_id] == station_name
assert self._names_to_ids[station_name] == station_id
def remove_mapping(self, station_id, station_name):
assert len(station_id) <= MAX_STATION_ID_LEN # if this happens, you may have given a name instead of an id
assert len(station_id) >= MIN_STATION_ID_LEN
assert station_id in self._ids_to_names
assert station_name in self._names_to_ids
del self._ids_to_names[station_id]
del self._names_to_ids[station_name]
def name_for_id(self, station_id):
if len(station_id) > MAX_STATION_ID_LEN or len(station_id) < MIN_STATION_ID_LEN:
raise InvalidStationId(station_id)
try:
return self._ids_to_names[station_id]
except KeyError as e:
raise InvalidStationId(station_id)
def id_for_name(self, station_name):
assert isinstance(station_name, unicode)
try:
return self._names_to_ids[station_name]
except KeyError, e:
raise InvalidStationNameExcept(station_name)
def all_ids(self):
for station_id in self._ids_to_names.keys():
yield station_id
def all_names(self):
for station_name in self._names_to_ids.keys():
yield station_name
def id_is_valid(self, station_id):
assert isinstance(station_id, unicode)
if len(station_id) < MIN_STATION_ID_LEN or len(station_id) > MAX_STATION_ID_LEN:
return False
return station_id in self._ids_to_names
def name_is_valid(self, station_name):
assert isinstance(station_name, unicode)
return station_name in self._names_to_ids
def ids_and_names(self):
for k,v in self._ids_to_names.items():
yield (k,v)
def build_browser():
import mechanize
from functools import partial
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(False)
br.addheaders = [ ('User-Agent', 'Firefox') ]
def get_raw_page(br, url):
br.open(url)
res = br.response()
return res.read()
def cached_get_page(br, url):
escaped_url = escape_url(url)
cache_path = os.path.join("/tmp", escaped_url)
now = datetime.datetime.now()
page_dat = None
need_new_page_dat = True
if os.path.exists(cache_path):
page_shelf = shelve.open(cache_path)
cache_date = page_shelf["page_retrieval_date"]
assert page_shelf["page_url"] == url # TODO: treat this as a runtime error, not an assertion
if cache_date - now > MAX_CACHE_TIME:
need_new_page_dat = True
else:
page_dat = page_shelf["page_dat"]
need_new_page_dat = False
if need_new_page_dat:
page_dat = get_raw_page(br, url)
shelf = shelve.open(cache_path)
shelf["page_url"] = url
shelf["page_dat"] = page_dat
shelf["page_retrieval_date"] = now
return bs4.BeautifulSoup(page_dat)
br.cached_get_page = partial(cached_get_page, br)
return br
def escape_url(url):
import urllib
return urllib.quote_plus(url)
def unescape_url(url):
import urllib
return urllib.unquote_plus(url)
def get_station_mapper():
"""
Returns a StationMapper, which provides a mapping of all station ids and names
"""
br = build_browser()
page = br.cached_get_page(nir_stations_url)
stationSelect = page.find('select', id='fromSelectBox')
station_mapper = StationMapper()
for station in stationSelect.findChildren():
station_name = unicode(station.contents[0].strip())
if station_name == "All Stations":
continue
station_id = unicode(station.attrs["value"]).strip()
station_mapper.add_mapping(station_id, station_name)
return station_mapper
def get_departures_by_station_ids(src_station_id, dst_station_id, station_mapper):
"""
Yields upcoming departures from the given src_station_name, to the given dst_station_name
"""
br = build_browser()
station_departures_url = nir_departures_url_template % { 'src': src_station_id, 'dst': dst_station_id }
page = br.cached_get_page(station_departures_url)
departuresList = page.find('div', id='portletDivBodyliveDepartures')
assert departuresList is not None
table_rows = departuresList.find('tbody').findAll('tr')
waypoints = []
train = None
for row in table_rows:
if 'onclick' in row.attrs:
# new train
if train != None:
yield (train, waypoints)
train = row.findAll('td')
train_departure_time = train[1].contents[0].strip()
train_departure_time = datetime_from_nir_time(train_departure_time)
# TODO: parse this into the actual time, or the adjusted time [don't know if these actually have adjusted times, but the waypoint status does]
train_departure_status = "".join(train[2].contents).strip()
train_dst_name = "".join(train[3].contents).strip()
train_dst_id = station_mapper.id_for_name(train_dst_name)
train = (src_station_id, train_dst_id, train_departure_time, train_departure_status)
waypoints = []
else:
for subrow in row.findAll('tr', attrs={'class': 'callingPatternRow'}):
subrow_els = list(subrow.children)
waypoint_time = subrow_els[1].contents[2]
waypoint_time = waypoint_time[:len(u" Dep.")]
waypoint_time = datetime_from_nir_time(waypoint_time)
# TODO: parse this into the actual time or the adjusted time
waypoint_status = subrow_els[3].string
waypoint_station_name = subrow_els[5].contents[0].replace(u"\xa0", u" ").strip()
waypoint_station_id = station_mapper.id_for_name(waypoint_station_name)
waypoints.append( (waypoint_time, waypoint_station_id, waypoint_status) )
if train is not None:
yield (train, waypoints)
def get_departures_by_station_names(src_station_name, dst_station_name, station_mapper):
"""
Yields upcoming departures from the given src_station_id, to the given dst_station_id
"""
assert station_mapper.name_is_valid(src_station_name)
assert station_mapper.name_is_valid(dst_station_name)
return get_departures_by_station_names(station_mapper.id_for_name(src_station_id), station_mapper.id_for_name(dst_station_id), station_mapper)
def pretty_print_departure(dep, station_mapper):
dep_info, waypoint_info = dep
train_src_id, train_dst_id, train_departure_time, train_departure_status = dep_info
train_src_name = station_mapper.name_for_id(train_src_id)
train_dst_name = station_mapper.name_for_id(train_dst_id)
print("")
print("#" * 78)
print("\n")
print("%s: %s to %s" % (train_departure_time.strftime("%H:%M"), train_src_name, train_dst_name))
print("=" * 78)
print("")
print(" Departure status: %s" % (train_departure_status,))
print("")
print(" Time Station Status")
print(" " + "-" * 67)
for wp_time, wp_station_id, wp_status in waypoint_info:
wp_time_str = wp_time.strftime("%H:%M")
print(" %-7s %-42s %s" % (wp_time_str, station_mapper.name_for_id(wp_station_id), wp_status))
print("")
|
import proj1_helpers as utils
import numpy as np
import algorithms as ML_alg
import preprocessing_functions as prf
from joblib import Parallel, delayed
def main():
(y, x, event_ids) = utils.load_csv_data("../data/train.csv")
x_nan_to_mean = prf.put_nan_to_mean(x, y)
y_bin = prf.pass_data_to_zero_one(y).reshape([y.shape[0],1])
std_x = prf.standardize(x_nan_to_mean)
degree_min = 30 * [1]
degree_max = 30 * [1]
Parallel(n_jobs = 2)(delayed(ML_alg.tuner_degree_lambda)(y = y_bin, x = std_x,\
degree_min = degree_min, degree_max = degree_max, lambda_min = -4,\
lambda_max = 0, nb_lambda = 30, k_fold=4, seed=1,\
max_iters=10000, gamma=gam, cost="reg_logistic", tol=1e-3,\
thresh_test_div=10, update_gamma=False) for gam in [1e-3,1e-4])
best_degree, best_lambda, min_rmse_te, min_rmse_tr = ML_alg.tuner_degree_lambda(y=y_bin, x=std_x,\
degree_min = degree_min, degree_max = degree_max, lambda_min = -4,\
lambda_max = 0, nb_lambda = 30, k_fold=4, seed=1,\
max_iters=10000, gamma=0.00001, cost="reg_logistic", tol=1e-3,\
thresh_test_div=10, update_gamma=False)
result_file = open("results_{deg}.txt".format(deg = 1), 'w')
result_file.close()
if (__name__ == "__main__"):
main()
|
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
import tkinter as tk
from PIL import Image
def onclick(event):
if event.xdata != None and event.ydata != None :
print(event.xdata, event.ydata)
def clickfun(img):
im=Image.open(img)
ax = plt.gca()
fig = plt.gcf()
implot = ax.imshow(im)
cid = fig.canvas.mpl_connect('key_press_event', onclick)
plt.show()
if __name__=='__main__':
clickfun('/Users/pengbohao/Downloads/2019summer/IMG_2881.JPG') |
a=list(input().split(" "))
c=0
m=0
for i in a:
for k in range(1+c,len(a)):
t=int(i)&int(a[k])
if t>m :
m=t
c=c+1
print(m) |
def Healt_calc(age, apples, cig):
health = (100-age) + apples*2 - (cig*2.8)
print(health)
Healt_calc(22,5,7)
venkat_data = [22,5,7]
Healt_calc(venkat_data[0],venkat_data[1],venkat_data[2])
Healt_calc(*venkat_data) # UNPACKING ARGUMENT |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
cloned from https://github.com/YieldNull/freeproxy.git
"""
from gevent import monkey
monkey.patch_socket()
import gevent
import re
import requests
import random
from time import time, sleep, localtime, strftime
from gevent.pool import Pool
from util.spider.freeproxy.proxy import fetch_proxies
from util.spider.freeproxy.commons import _headers, _user_agents, _sleep_time
from util.spider.freeproxy.db_utils import _clawer_db, create_connection, upsert_proxy, reload_activate_proxies, all_proxy
def validate_proxies(proxies: [], timeout=40) -> []:
"""
Test proxies, or process html source using callback in the meantime.
:type proxies: list
:param proxies: proxies
:param timeout: response timeout
:return: [(proxy1, response_time1, start_time1, xp1), ... ,(proxyn, response_timen, start_timen, xpn)]
"""
test_url = 'http://www.imeidb.com/#lookup'
result = []
proxies = set(proxies)
errors = set()
pool = Pool(50)
def load(proxy):
code = None
xp = ''
is_success = False
#sleep(random.uniform(0, 0.1))
start_time = time()
str_time = strftime('%Y-%m-%d %H:%M:%S', localtime(start_time))
print(str_time + '开始测试代理' + proxy)
try:
with gevent.Timeout(seconds=timeout, exception=Exception('[Connection Timeout]')):
_headers['User-Agent'] = random.choice(_user_agents)
proxies = {'http': 'http://{}'.format(proxy.strip()),
'https': 'https://{}'.format(proxy.strip())}
res = requests.get(test_url, proxies=proxies, headers=_headers)
code = res.status_code
source = res.text
#welcome = re.findall('(<h1>欢迎来到IMEIdb</h1>)', source)
xp_list = re.findall('input type="hidden" name="xp" value="(.*?)"', source)
if len(xp_list) > 0:
xp = xp_list[0]
is_success = True
if code != 200:
errors.add(proxy)
except Exception as e:
# log(e.args)
errors.add(proxy)
end_time = time()
if code == 200 and is_success:
escaped = end_time - start_time
result.append((proxy, round(escaped, 2), str(start_time), xp))
index = 0
for proxy in proxies:
pool.spawn(load, proxy)
index += 1
if index % 50 == 0:
sleep(random.uniform(int(_sleep_time / 4), int(_sleep_time / 2)))
pool.join()
proxies = proxies - errors
print('[HTTP Proxies] Available:{:d} Deprecated:{:d}'.format(len(proxies), len(errors)))
return result
def load_from_web():
web_proxies = fetch_proxies()
proxies = validate_proxies(web_proxies)
proxy_list = [l[0:1] for l in proxies]
# 将代理写入数据库
conn = create_connection(_clawer_db)
with conn:
try:
# 有效代理并入代理表
print('有效代理并入数据库')
upsert_proxy(conn, proxy_list)
# 重生成有效代理表数据
print('重新载入有效代理')
reload_activate_proxies(conn, proxies)
except Exception as e:
print(e)
def load_from_db():
# 从数据库载入所有可用的代理并测试
db_proxies = []
conn = create_connection(_clawer_db)
with conn:
try:
all_proxies = all_proxy(conn)
if all_proxies is not None:
for db_proxy in all_proxies:
db_proxies += db_proxy
except Exception as e:
print(e)
proxies = validate_proxies(db_proxies)
# 将代理写入数据库
conn = create_connection(_clawer_db)
with conn:
try:
# 重新载入有效代理表数据
print('重新载入有效代理')
reload_activate_proxies(conn, proxies)
except Exception as e:
print(e)
if __name__ == '__main__':
#test_url = 'http://icanhazip.com/'
load_from_web()
|
#-*-coding:utf-8*-
from lxml import etree
import sys
import pytz
import datetime
#评论页解析json数据
class reviews_analysis():
def process(self,text,url):
tz = pytz.timezone('Asia/Shanghai')
last_update_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
if text == '' or text == 'None':
print "Can't get the html from taobao_review"
sys.exit()
data = []
item = {}
for i in range(len(text['rateList'])):
item={}
item['review_json_link']=url
item['last_update_time']=last_update_time
# 总评价数量
try:
# print text['rateCount']['total']
item['review_count'] = text['rateCount']['total']
except Exception as err:
print err
# 带有图片的评论数量
try:
# print text['rateCount']['picNum']
item['image_count'] = text['rateCount']['picNum']
except Exception as err:
print err
try:
# 评论
# print text['rateList'][i]['rateContent']
item['review_content'] = text['rateList'][i]['rateContent']
except Exception as err:
print err
try:
# 评论时间
# print text['rateList'][i]['rateDate']
item['review_time'] = text['rateList'][i]['rateDate']
except Exception as err:
print err
try:
# 购买产品类型相关
# print text['rateList'][i]['auctionSku']
item['product_information'] = text['rateList'][i]['auctionSku']
except Exception as err:
print err
try:
# 评价人名称
# print text['rateList'][i]['displayUserNick']
item['reviewer_name'] = text['rateList'][i]['displayUserNick']
except Exception as err:
print err
try:
# 评价人id
# print text['rateList'][i]['id']
item['reviewer_id'] = text['rateList'][i]['id']
except Exception as err:
print err
try:
# 评论中图片url
# print text['rateList'][i]['pics']
item['image_url'] = text['rateList'][i]['pics']
except Exception as err:
print err
try:
# 卖家id
# print text['rateList'][i]['sellerId']
item['seller_id'] = text['rateList'][i]['sellerId']
except Exception as err:
print err
if (len(item) > 0):
data.append(item)
if (len(data) > 0):
# print ("##################################################################################################################################################################################")
# print data
# return data[0]
# print (len(data))
return data
# print data
|
import os
from flask import Flask, redirect, url_for, request, render_template, jsonify
from pymongo import MongoClient
app = Flask(__name__)
client = MongoClient(os.environ['DB_PORT_27017_TCP_ADDR'], 27017)
db = client.telemetrydb
@app.route('/api/v1.0/telemetry', methods=['GET'])
def get_all_telemetry():
telemetry = db.telemetrydb
output = []
for t in telemetry.find():
output.append({
'altitude': t['altitude'],
'longitude': t['longitude'],
'latitude': t['latitude'],
'time': t['time'],
'velocity': t['velocity']
})
return jsonify({'result': output})
@app.route('/api/v1.0/telemetry', methods=['POST'])
def post_telemetry():
_telemetry = request.json
db.telemetrydb.insert_one(_telemetry)
return "Added."
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
|
from .models import Resource
from django import forms
class NewResourceForm(forms.ModelForm):
class Meta:
model = Resource
fields = ['title', 'description', 'cost', 'subject', 'pdf_file']
class UpdateResourceForm(forms.ModelForm):
class Meta:
model = Resource
fields = ['title', 'description', 'cost'] |
# ua = "Mozilla%252F5.0%2B%28iPhone%253B%2BCPU%2BiPhone%2BOS%2B12_2%2Blike%2BMac%2BOS%2BX%29%2BAppleWebKit%252F605.1.15%2B%28KHTML%2C%2Blike%2BGecko%29%2BVersion%252F13.0%2BMobile%252F15E148%2BSafari%252F604.1"
# browser(url = "test", ua = ua)
import asyncio
import pyppeteer
import random
import time
import json
import string
import atexit
import requests
import logging
from pyppeteer.page import Page
import re
async def chrome_runtime(page: Page) -> None:
await page.evaluateOnNewDocument("""
() => {
window.chrome = {
runtime: {}
}
}
""")
async def console_debug(page: Page) -> None:
await page.evaluateOnNewDocument("""
() => {
window.console.debug = () => {
return null
}
}
""")
async def iframe_content_window(page: Page) -> None:
await page.evaluateOnNewDocument("""
() => {
try {
// Adds a contentWindow proxy to the provided iframe element
const addContentWindowProxy = iframe => {
const contentWindowProxy = {
get(target, key) {
// Now to the interesting part:
// We actually make this thing behave like a regular iframe window,
// by intercepting calls to e.g. `.self` and redirect it to the correct thing. :)
// That makes it possible for these assertions to be correct:
// iframe.contentWindow.self === window.top // must be false
if (key === 'self') {
return this
}
// iframe.contentWindow.frameElement === iframe // must be true
if (key === 'frameElement') {
return iframe
}
return Reflect.get(target, key)
}
}
if (!iframe.contentWindow) {
const proxy = new Proxy(window, contentWindowProxy)
Object.defineProperty(iframe, 'contentWindow', {
get() {
return proxy
},
set(newValue) {
return newValue // contentWindow is immutable
},
enumerable: true,
configurable: false
})
}
}
// Handles iframe element creation, augments `srcdoc` property so we can intercept further
const handleIframeCreation = (target, thisArg, args) => {
const iframe = target.apply(thisArg, args)
// We need to keep the originals around
const _iframe = iframe
const _srcdoc = _iframe.srcdoc
// Add hook for the srcdoc property
// We need to be very surgical here to not break other iframes by accident
Object.defineProperty(iframe, 'srcdoc', {
configurable: true, // Important, so we can reset this later
get: function() {
return _iframe.srcdoc
},
set: function(newValue) {
addContentWindowProxy(this)
// Reset property, the hook is only needed once
Object.defineProperty(iframe, 'srcdoc', {
configurable: false,
writable: false,
value: _srcdoc
})
_iframe.srcdoc = newValue
}
})
return iframe
}
// Adds a hook to intercept iframe creation events
const addIframeCreationSniffer = () => {
/* global document */
const createElement = {
// Make toString() native
get(target, key) {
return Reflect.get(target, key)
},
apply: function(target, thisArg, args) {
const isIframe =
args && args.length && `${args[0]}`.toLowerCase() === 'iframe'
if (!isIframe) {
// Everything as usual
return target.apply(thisArg, args)
} else {
return handleIframeCreation(target, thisArg, args)
}
}
}
// All this just due to iframes with srcdoc bug
document.createElement = new Proxy(
document.createElement,
createElement
)
}
// Let's go
addIframeCreationSniffer()
} catch (err) {
// console.warn(err)
}
}
""")
async def media_codecs(page: Page) -> None:
await page.evaluateOnNewDocument("""
() => {
try {
/**
* Input might look funky, we need to normalize it so e.g. whitespace isn't an issue for our spoofing.
*
* @example
* video/webm; codecs="vp8, vorbis"
* video/mp4; codecs="avc1.42E01E"
* audio/x-m4a;
* audio/ogg; codecs="vorbis"
* @param {String} arg
*/
const parseInput = arg => {
const [mime, codecStr] = arg.trim().split(';')
let codecs = []
if (codecStr && codecStr.includes('codecs="')) {
codecs = codecStr
.trim()
.replace(`codecs="`, '')
.replace(`"`, '')
.trim()
.split(',')
.filter(x => !!x)
.map(x => x.trim())
}
return { mime, codecStr, codecs }
}
/* global HTMLMediaElement */
const canPlayType = {
// Make toString() native
get(target, key) {
// Mitigate Chromium bug (#130)
if (typeof target[key] === 'function') {
return target[key].bind(target)
}
return Reflect.get(target, key)
},
// Intercept certain requests
apply: function(target, ctx, args) {
if (!args || !args.length) {
return target.apply(ctx, args)
}
const { mime, codecs } = parseInput(args[0])
// This specific mp4 codec is missing in Chromium
if (mime === 'video/mp4') {
if (codecs.includes('avc1.42E01E')) {
return 'probably'
}
}
// This mimetype is only supported if no codecs are specified
if (mime === 'audio/x-m4a' && !codecs.length) {
return 'maybe'
}
// This mimetype is only supported if no codecs are specified
if (mime === 'audio/aac' && !codecs.length) {
return 'probably'
}
// Everything else as usual
return target.apply(ctx, args)
}
}
HTMLMediaElement.prototype.canPlayType = new Proxy(
HTMLMediaElement.prototype.canPlayType,
canPlayType
)
} catch (err) {}
}
""")
async def navigator_languages(page: Page) -> None:
await page.evaluateOnNewDocument("""
() => {
Object.defineProperty(navigator, 'languages', {
get: () => ['en-US', 'en']
})
}
""")
async def navigator_permissions(page: Page) -> None:
await page.evaluateOnNewDocument("""
() => {
const originalQuery = window.navigator.permissions.query
window.navigator.permissions.__proto__.query = parameters =>
parameters.name === 'notifications'
? Promise.resolve({ state: Notification.permission })
: originalQuery(parameters)
const oldCall = Function.prototype.call
function call () {
return oldCall.apply(this, arguments)
}
Function.prototype.call = call
const nativeToStringFunctionString = Error.toString().replace(
/Error/g,
'toString'
)
const oldToString = Function.prototype.toString
function functionToString () {
if (this === window.navigator.permissions.query) {
return 'function query() { [native code] }'
}
if (this === functionToString) {
return nativeToStringFunctionString
}
return oldCall.call(oldToString, this)
}
Function.prototype.toString = functionToString
}
""")
async def navigator_plugins(page: Page) -> None:
await page.evaluateOnNewDocument("""
() => {
function mockPluginsAndMimeTypes() {
const makeFnsNative = (fns = []) => {
const oldCall = Function.prototype.call
function call() {
return oldCall.apply(this, arguments)
}
Function.prototype.call = call
const nativeToStringFunctionString = Error.toString().replace(
/Error/g,
'toString'
)
const oldToString = Function.prototype.toString
function functionToString() {
for (const fn of fns) {
if (this === fn.ref) {
return `function ${fn.name}() { [native code] }`
}
}
if (this === functionToString) {
return nativeToStringFunctionString
}
return oldCall.call(oldToString, this)
}
Function.prototype.toString = functionToString
}
const mockedFns = []
const fakeData = {
mimeTypes: [
{
type: 'application/pdf',
suffixes: 'pdf',
description: '',
__pluginName: 'Chrome PDF Viewer'
},
{
type: 'application/x-google-chrome-pdf',
suffixes: 'pdf',
description: 'Portable Document Format',
__pluginName: 'Chrome PDF Plugin'
},
{
type: 'application/x-nacl',
suffixes: '',
description: 'Native Client Executable',
enabledPlugin: Plugin,
__pluginName: 'Native Client'
},
{
type: 'application/x-pnacl',
suffixes: '',
description: 'Portable Native Client Executable',
__pluginName: 'Native Client'
}
],
plugins: [
{
name: 'Chrome PDF Plugin',
filename: 'internal-pdf-viewer',
description: 'Portable Document Format'
},
{
name: 'Chrome PDF Viewer',
filename: 'mhjfbmdgcfjbbpaeojofohoefgiehjai',
description: ''
},
{
name: 'Native Client',
filename: 'internal-nacl-plugin',
description: ''
}
],
fns: {
namedItem: instanceName => {
const fn = function (name) {
if (!arguments.length) {
throw new TypeError(
`Failed to execute 'namedItem' on '${instanceName}': 1 argument required, but only 0 present.`
)
}
return this[name] || null
}
mockedFns.push({ ref: fn, name: 'namedItem' })
return fn
},
item: instanceName => {
const fn = function (index) {
if (!arguments.length) {
throw new TypeError(
`Failed to execute 'namedItem' on '${instanceName}': 1 argument required, but only 0 present.`
)
}
return this[index] || null
}
mockedFns.push({ ref: fn, name: 'item' })
return fn
},
refresh: instanceName => {
const fn = function () {
return undefined
}
mockedFns.push({ ref: fn, name: 'refresh' })
return fn
}
}
}
const getSubset = (keys, obj) =>
keys.reduce((a, c) => ({ ...a, [c]: obj[c] }), {})
function generateMimeTypeArray() {
const arr = fakeData.mimeTypes
.map(obj => getSubset(['type', 'suffixes', 'description'], obj))
.map(obj => Object.setPrototypeOf(obj, MimeType.prototype))
arr.forEach(obj => {
arr[obj.type] = obj
})
arr.namedItem = fakeData.fns.namedItem('MimeTypeArray')
arr.item = fakeData.fns.item('MimeTypeArray')
return Object.setPrototypeOf(arr, MimeTypeArray.prototype)
}
const mimeTypeArray = generateMimeTypeArray()
Object.defineProperty(navigator, 'mimeTypes', {
get: () => mimeTypeArray
})
function generatePluginArray() {
const arr = fakeData.plugins
.map(obj => getSubset(['name', 'filename', 'description'], obj))
.map(obj => {
const mimes = fakeData.mimeTypes.filter(
m => m.__pluginName === obj.name
)
mimes.forEach((mime, index) => {
navigator.mimeTypes[mime.type].enabledPlugin = obj
obj[mime.type] = navigator.mimeTypes[mime.type]
obj[index] = navigator.mimeTypes[mime.type]
})
obj.length = mimes.length
return obj
})
.map(obj => {
obj.namedItem = fakeData.fns.namedItem('Plugin')
obj.item = fakeData.fns.item('Plugin')
return obj
})
.map(obj => Object.setPrototypeOf(obj, Plugin.prototype))
arr.forEach(obj => {
arr[obj.name] = obj
})
arr.namedItem = fakeData.fns.namedItem('PluginArray')
arr.item = fakeData.fns.item('PluginArray')
arr.refresh = fakeData.fns.refresh('PluginArray')
return Object.setPrototypeOf(arr, PluginArray.prototype)
}
const pluginArray = generatePluginArray()
Object.defineProperty(navigator, 'plugins', {
get: () => pluginArray
})
makeFnsNative(mockedFns)
}
try {
const isPluginArray = navigator.plugins instanceof PluginArray
const hasPlugins = isPluginArray && navigator.plugins.length > 0
if (isPluginArray && hasPlugins) {
return
}
mockPluginsAndMimeTypes()
} catch (err) { }
}
""")
async def navigator_webdriver(page: Page) -> None:
await page.evaluateOnNewDocument(
"""
() => {
Object.defineProperty(window, 'navigator', {
value: new Proxy(navigator, {
has: (target, key) => (key === 'webdriver' ? false : key in target),
get: (target, key) =>
key === 'webdriver'
? undefined
: typeof target[key] === 'function'
? target[key].bind(target)
: target[key]
})
})
}
""")
async def user_agent(page: Page) -> None:
ua = await page.browser.userAgent()
ua = ua.replace("HeadlessChrome", "Chrome") # hide headless nature
ua = re.sub(r'\(([^)]+)\)', '(Windows NT 10.0; Win64; x64)', ua, 1) # ensure windows
await page.setUserAgent(ua)
async def webgl_vendor(page: Page) -> None:
await page.evaluateOnNewDocument("""
() => {
try {
const getParameter = WebGLRenderingContext.prototype.getParameter
WebGLRenderingContext.prototype.getParameter = function (parameter) {
if (parameter === 37445) {
return 'Intel Inc.'
}
if (parameter === 37446) {
return 'Intel Iris OpenGL Engine'
}
return getParameter.apply(this, [parameter])
}
} catch (err) {}
}
""")
async def window_outerdimensions(page: Page) -> None:
await page.evaluateOnNewDocument("""
() => {
try {
if (window.outerWidth && window.outerHeight) {
return
}
const windowFrame = 85
window.outerWidth = window.innerWidth
window.outerHeight = window.innerHeight + windowFrame
} catch (err) { }
}
""")
async def stealth(page: Page) -> None:
if not isinstance(page, Page):
raise ValueError("page must is pyppeteer.page.Page")
#await chrome_runtime(page)
await console_debug(page)
await iframe_content_window(page)
#await navigator_languages(page)
await navigator_permissions(page)
await navigator_plugins(page)
await navigator_webdriver(page)
#await navigator_vendor(page)
await user_agent(page)
await webgl_vendor(page)
await window_outerdimensions(page)
await media_codecs(page)
class browser:
def __init__(self, url, ua):
self.url = url
self.userAgent = ua #"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.0 Safari/537.36)"
self.args = [
"--no-sandbox",
"--disable-setuid-sandbox",
"--disable-infobars",
"--window-position=0,0",
"--ignore-certifcate-errors",
"--ignore-certifcate-errors-spki-list",
"--user-agent=" + self.userAgent
]
self.options = {
'args': self.args,
'headless': True,
'ignoreHTTPSErrors': True,
'userDataDir': "./tmp",
'handleSIGINT': False,
'handleSIGTERM': False,
'handleSIGHUP': False
}
loop = asyncio.new_event_loop()
loop.run_until_complete(self.start())
async def start(self):
self.browser = await pyppeteer.launch(self.options)
self.page = await self.browser.newPage()
await self.page.evaluateOnNewDocument("""() => {delete navigator.__proto__.webdriver;}""")
await stealth(self.page)
await self.page.goto("about:blank", {
'waitUntil': "load"
})
await self.page.setUserAgent(self.userAgent)
await self.page.evaluate("() => { " + self.__get_js() + " }")
self.signature = await self.page.evaluate('''() => {
var urls = ["''' + self.url + '''"]
var token = urls.map(x => window.byted_acrawler.sign({ url: x}))
// var t = {}
// webpackJsonp.filter(x => typeof x[1]['duD4'] === "function")[0][1].duD4(null, t)
// var token = urls.map(x => t.sign({ url: x}))
return token;
}''')
self.data = await self.page.content()
await self.browser.close()
await self.browser.close()
self.browser.process.communicate()
def __get_js(self):
return requests.get("https://sf16-muse-va.ibytedtos.com/obj/rc-web-sdk-gcs/acrawler.js").text
|
# https://www.hackerrank.com/challenges/s10-standard-deviation/problem
"""
Objective
In this challenge, we practice calculating standard deviation.
Task
Given an array, X, of N integers, calculate and print the standard deviation. Your answer should be in decimal form, rounded to a scale of 1 decimal place.
An error margin of +-0.1 will be tolerated for the standard deviation.
"""
# challenge can easily be solved using statistics.pstdev()
import math
# function to calculate the mean/average
def avg(ls_val):
return round(sum(ls_val)/len(ls_val),1)
# function to calculate the average squared distance from the mean, the Variance
def variance(ls_val):
ls_val_avg = avg(ls_val)
sum_sq_dist = 0
for i in ls_val:
# substract the avg from the i value and sum to the total squared distance
sum_sq_dist +=(i-ls_val_avg)**2
#devide the sum of the squared distances by the count of elements in the list
return sum_sq_dist/len(ls_val)
# number of elements in list computed using len()
# following variable only needed for the challenge to work but not for the script
n = input()
ls_val = list(map(float, input().strip().split()))
ls_val_var = variance(ls_val)
print(round(math.sqrt(ls_val_var),1))
|
import collections
from typing import Tuple
import jwt
from django.conf import settings
from rest_framework import status, serializers
from rest_framework.views import APIView
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.generics import ListAPIView, RetrieveAPIView
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from django.views.decorators.cache import cache_page
from apps.internal_users import models
from apps.apartments_analyzer import entities
from apps.apartments_analyzer.utils import construct_onliner_user_url
from apps.apartments_analyzer.models import (
RentApartment,
AreaOfInterest,
PrecalculatedApartmentStats,
)
from apps.apartments_analyzer.permissions import TelegramAuthAccess
from apps.apartments_analyzer.api.serializers import (
LatestStatsSerializer,
StatsHistorySerializer,
RentApartmentSerializer,
)
from apps.apartments_analyzer.services.telegram_auth import TelegramUserService
from apps.apartments_analyzer.services.stats_aggregator import (
ApartmentsStatisticsAggregator,
)
from django.utils.decorators import method_decorator
from django.utils.functional import cached_property
AGENT_COUNT_THRESHOLD = 2
def data_representation_as_tuple(date_representation: str) -> Tuple:
return tuple((int(date_part) for date_part in date_representation.split("-")))
class ApartmentsListAPIView(ListAPIView):
serializer_class = RentApartmentSerializer
filter_backends = [SearchFilter, OrderingFilter]
permission_classes = (AllowAny,)
ordering = "-price"
search_fields = ["price"]
def get_queryset(self):
qs = RentApartment.objects.prefetch_related("images")
return qs
class ApartmentsStatsAPIView(APIView):
permission_classes = (AllowAny,)
@method_decorator(cache_page(60 * 60 * 10))
def get(self, *args, **kwargs):
stats = {}
stats["by_hour"] = ApartmentsStatisticsAggregator.get_hour_aggregated_stats()
stats[
"average_square_meter_price"
] = ApartmentsStatisticsAggregator.get_average_square_meter_price_in_usd()
stats[
"by_weekday"
] = ApartmentsStatisticsAggregator.get_weekday_aggregated_stats()
return Response(stats)
class ApartmentsLatestStatsAPIView(RetrieveAPIView):
permission_classes = (AllowAny,)
serializer_class = LatestStatsSerializer
def get_object(self):
return PrecalculatedApartmentStats.objects.fetch_latest()
class ApartmentsStatsProgressAPIView(ListAPIView):
permission_classes = (AllowAny,)
pagination_class = None
serializer_class = StatsHistorySerializer
def get_queryset(self):
return PrecalculatedApartmentStats.objects.latest_per_day(days_before=60)
class SearchAreaListSerializer(serializers.ModelSerializer):
class Meta:
model = AreaOfInterest
fields = (
"uuid",
"poly",
"name",
)
class SearchAreasView(ListAPIView):
permission_classes = (TelegramAuthAccess,)
serializer_class = SearchAreaListSerializer
service = TelegramUserService.from_settings(settings)
def get_queryset(self):
search = self.service.get_search(self.request.telegram_user)
return search.areas_of_interest.all()
class PriceFluctuationsAPIView(APIView):
permission_classes = (AllowAny,)
def as_response(self, fluctuation_data):
"""
The stats are also subdivided for the number of rooms
as the metric that has the most effect on the price.
Sample response:
[
['2019-08', {'rooms': {'1': 100.0]}}],
...
]
"""
data = collections.defaultdict(lambda: {"rooms": {}})
for item in fluctuation_data:
data[item["import_month"]]["rooms"][item["total_rooms"]] = item[
"average_price"
]
return [[item, value] for item, value in data.items()]
@method_decorator(cache_page(60 * 60 * 10))
def get(self, *args, **kwargs):
return Response(
self.as_response(
ApartmentsStatisticsAggregator.prices_fluctuation_per_month()
)
)
class DailyPriceFluctuationsAPIView(APIView):
permission_classes = (AllowAny,)
def as_response(self, fluctuation_data):
"""
The stats are also subdivided for the number of rooms
as the metric that has the most effect on the price.
Sample response:
[
['2019-08', {'rooms': {'1': 100.0]}}],
...
]
"""
data = collections.defaultdict(lambda: {"rooms": {}})
for item in fluctuation_data:
data[item["import_day"]]["rooms"][item["total_rooms"]] = item[
"average_price"
]
return [
[item, value]
for item, value in sorted(
data.items(), key=lambda item: data_representation_as_tuple(item[0])
)
]
@method_decorator(cache_page(60 * 60 * 10))
def get(self, *args, **kwargs):
return Response(
self.as_response(
ApartmentsStatisticsAggregator.prices_fluctuation_per_day()
)
)
class AgentCheckView(APIView):
def get(self, request, *args, **kwargs):
"""
Checks whether the given user id belongs
to the agent and also returns a list
of bullettins
"""
user_id = kwargs["user_id"]
user_url = construct_onliner_user_url(user_id)
apartment_urls = RentApartment.objects.filter(author_url=user_url).values_list(
"bullettin_url", flat=True
)
apartment_urls = set(apartment_urls)
if len(apartment_urls) <= AGENT_COUNT_THRESHOLD:
is_agent_probability = 0
else:
is_agent_probability = 100
payload = {
"is_agent_probability": is_agent_probability,
"posts": apartment_urls,
}
return Response(payload, status=status.HTTP_200_OK)
class TelegramAuthUserView(APIView):
permission_classes = (AllowAny,)
@cached_property
def service(self):
return TelegramUserService(settings.TELEGRAM_ACCESS_TOKEN)
def authenticate_user(self, user_data: dict) -> models.TelegramUser:
telegram_id = int(user_data["id"])
get = user_data.get
first_name, last_name, username = (
get("first_name"),
get("last_name"),
get("username"),
)
internal_user = self.service.get_or_create_internal_user(
entities.TelegramUserData(
id=telegram_id,
first_name=first_name,
last_name=last_name,
username=username,
)
)
return internal_user
def auth_token_for_user(self, user: models.TelegramUser) -> str:
return jwt.encode(
{"telegram_id": user.telegram_id, "user_id": user.pk,},
settings.SECRET_KEY,
algorithm="HS256",
)
def post(self, request, *args, **kwargs):
user_data = request.data.copy()
user_data.pop("format", "")
self.service.verify_telegram_payload(user_data)
user = self.authenticate_user(user_data=user_data)
return Response(
{
"id": user.pk,
"token": self.auth_token_for_user(user=user),
"username": user.username,
},
status=status.HTTP_200_OK,
)
class TelegramTokenVerifyView(APIView):
permission_classes = (
AllowAny,
TelegramAuthAccess,
)
def get(self, request, *args, **kwargs):
telegram_user = request.telegram_user
return Response(
{"id": telegram_user.pk, "username": telegram_user.username,},
status=status.HTTP_200_OK,
)
|
# Jeopardy!
# Ranges $100
print("Ranges $100")
for i in range(4):
print(i)
# Ranges $200
print("Ranges $200")
for j in range(0,4):
print(j)
# Ranges $300
print("Ranges $300")
for ji in range(1,3):
print(ji)
# Ranges $400
print("Ranges $400")
for var in range(2,3):
print(var)
# Ranges $500
print("Ranges $500")
for num in range(5,5):
print(num)
|
#!/usr/bin/env python3
from tkinter import *
from tkinter import filedialog
from bs4 import BeautifulSoup
import requests
import re
from fpdf import FPDF
from PIL import Image
# HTML link to save images from
html_link = "https://manganelo.com/chapter/ranma_12/chapter_1"
# Select where to save images
# root = Tk()
# root.withdraw()
# savedir = filedialog.askdirectory(initialdir="/Users/ivancito", title="Select folder to save images")
#
# Request website and parse it
r = requests.get(html_link)
soup = BeautifulSoup(r.content, "lxml")
# Loop through all images in the HTML
# TODO: Convert this into a function
img_links = []
for image in soup.find_all('img'):
#Find the /##.jpg pattern and append to the list
pattern = r"/\d+\.jpg"
if re.search(pattern, image.get('src')):
img_links.append(image.get('src'))
# Create fileName list
# TODO: Convert this into a function
fileNames = []
for ii in range(len(img_links)):
fileNames.append(re.findall(r"\d+.jpg", img_links[ii])[0])
# Request images from website and save them
for ii in range(len(img_links)):
image_request = requests.get(img_links[ii])
print(img_links[ii])
with open(fileNames[ii], 'wb') as f:
f.write(image_request.content)
# # Convert Images to pdf
# pdf = FPDF()
#
# for image in fileNames[0:9]:
# pdf.add_page()
# pdf.image(image, x=0, y=0)
#
# pdf.output("yourfile.pdf", "F") |
#!/usr/bin/env python
# --!-- coding: utf8 --!--
import re
import noteflow.functions as F
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class TagCollector(QObject):
tagsChanged = pyqtSignal()
def __init__(self):
QObject.__init__(self)
self._tags = []
def addTag(self, text, color=None, background=None, border=None):
t = Tag(text, color=color, background=background, border=border)
self._tags.append(t)
t.changed.connect(self.tagsChanged)
self.tagsChanged.emit()
def toListLower(self):
return [t.text.lower() for t in self._tags]
def find(self, text):
t = [t for t in self._tags if text.lower() == t.text.lower()]
if t:
return t[0]
def contains(self, tag):
return tag.lower() in self.toListLower()
def match(self, note):
for t in self._tags:
if t.match(note):
return t
return False
def removeTag(self, text):
t = self.find(text)
if not t:
return
self._tags.remove(t)
self.tagsChanged.emit()
def __iter__(self):
return iter(self._tags)
class Tag(QObject):
changed = pyqtSignal()
def __init__(self, text, color=None, background=None, border=None):
QObject.__init__(self)
self.text = text if text[0] == "#" else "#"+text
self.color = QColor(color) if color else None
self.background = QColor(background) if background else None
self.border = QColor(border) if border else None
def match(self, note):
return self.text.lower() in note.text.lower()
def toString(self):
return "{},{},{},{}".format(
self.text,
self.color.name() if self.color else "",
self.background.name() if self.background else "",
self.border.name() if self.border else "",
)
|
# -*- coding: utf-8 -*-
# coding = utf-8
import datetime
from django.contrib import admin
from django.db.models.aggregates import Sum
from django.utils.translation import ugettext_lazy as _
from basedata.models import ExtraParam
from common import generic
from selfhelp.models import WorkOrder, WOExtraValue, WOItem, Reimbursement, ReimbursementItem, Loan, Enroll, Feedback, \
Activity
class ParamValueInline(admin.TabularInline):
model = WOExtraValue
fields = ('param_name', 'param_value')
readonly_fields = ['param_name']
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
if db_field.name == 'param_name':
app_info = generic.get_app_model_info_from_request(request)
instance = app_info['obj']
if instance:
kwargs['queryset'] = ExtraParam.objects.filter(material=instance.service)
return super(ParamValueInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_extra(self, request, obj=None, **kwargs):
if obj:
return 0
else:
return 1
class ItemInline(admin.TabularInline):
model = WOItem
raw_id_fields = ['material']
def get_extra(self, request, obj=None, **kwargs):
if obj:
return 0
else:
return 1
class WorkOrderAdmin(generic.BOAdmin):
"""
"""
CODE_PREFIX = 'WO'
CODE_NUMBER_WIDTH = 5
list_display = ['code', 'begin', 'title', 'classification', 'business_domain', 'status']
list_display_links = ['code', 'title']
exclude = ['creator', 'modifier', 'creation', 'modification']
search_fields = ['code', 'title']
list_filter = ['classification', 'service', 'status']
fields = (
('begin', 'end',),
('code', 'refer',), ('classification', 'business_domain',),
('service', 'project',),
('title', 'status',), ('description',), ('attach',), ('detail',)
)
readonly_fields = ['status']
raw_id_fields = ['service', 'project', 'refer']
inlines = [ItemInline, ParamValueInline]
date_hierarchy = 'begin'
def save_model(self, request, obj, form, change):
if obj.user is None:
obj.user = request.user
super(WorkOrderAdmin, self).save_model(request, obj, form, change)
def get_changeform_initial_data(self, request):
import datetime
td = datetime.date.today()
end = td + datetime.timedelta(30)
return {'begin': datetime.date.today, 'end': end}
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
if db_field.name == 'refer':
app_info = generic.get_app_model_info_from_request(request)
if app_info and app_info['obj']:
kwargs['queryset'] = WorkOrder.objects.exclude(id=app_info['id'])
return super(WorkOrderAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
class LoanAdmin(generic.BOAdmin):
CODE_PREFIX = 'JK'
CODE_NUMBER_WIDTH = 5
list_display = ['code', 'title', 'project', 'loan_amount', 'applier', 'status']
list_display_links = ['code', 'title']
readonly_fields = ['status', 'logout_time', 'logout_amount']
raw_id_fields = ['project', 'user']
fields = (
('code',), ('title', 'loan_amount',), ('description',), ('project'), ('user', 'status'),
('logout_time', 'logout_amount',),
)
extra_buttons = [{'href': 'pay', 'title': _('pay')}]
search_fields = ['code', 'title', 'user__username']
date_hierarchy = 'begin'
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
if object_id:
try:
obj = Loan.objects.get(id=object_id)
if obj and obj.status == 'P':
extra_context = extra_context or {}
extra_context.update(dict(readonly=True))
except Exception as e:
pass
return super(LoanAdmin, self).changeform_view(request, object_id, form_url, extra_context)
def save_model(self, request, obj, form, change):
if obj and obj.user is None:
obj.user = request.user
super(LoanAdmin, self).save_model(request, obj, form, change)
class ReimbursementItemInline(admin.TabularInline):
model = ReimbursementItem
raw_id_fields = ['expense_account']
def get_extra(self, request, obj=None, **kwargs):
if obj:
return 0
else:
return 1
class ReimbursementAdmin(generic.BOAdmin):
CODE_PREFIX = 'BX'
CODE_NUMBER_WIDTH = 5
list_display = ['code', 'title', 'project', 'amount', 'applier', 'status']
list_display_links = ['code', 'title']
inlines = [ReimbursementItemInline]
raw_id_fields = ['project', 'wo', 'user', 'org']
readonly_fields = ['loan_amount', 'pay_time', 'status', 'amount']
fieldsets = [
(None, {'fields': [('code', 'user'), ('title', 'amount', 'status',), ('description',), ('project', 'wo',)]}),
(_('fico'), {'fields': [('org',), ('loan',), ('logout_amount', 'pay_amount',)], 'classes': ['collapse']})
]
extra_buttons = [{'href': 'pay', 'title': _('pay')}]
search_fields = ['code', 'title', 'project__code', 'project__name', 'user__username']
date_hierarchy = 'begin'
def get_changeform_initial_data(self, request):
apps = generic.get_app_model_info_from_request(request)
obj = getattr(apps, 'obj', None)
current = request.user
if obj:
current = obj.user
sm = Loan.objects.filter(user=current).aggregate(Sum('loan_amount')).get('loan_amount__sum') or 0.00
return {'loan_amount': sm}
def save_model(self, request, obj, form, change):
if obj and obj.user is None:
obj.user = request.user
super(ReimbursementAdmin, self).save_model(request, obj, form, change)
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
if db_field.name == 'loan':
apps = generic.get_app_model_info_from_request(request)
current = request.user
if apps:
obj = apps.get('obj')
current = obj.user
if obj.status == 'P':
kwargs['queryset'] = Loan.objects.filter(id=obj.loan.id)
else:
kwargs['queryset'] = Loan.objects.filter(user=current, is_clear=0)
else:
kwargs['queryset'] = Loan.objects.filter(user=current, is_clear=0)
return super(ReimbursementAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
if object_id:
try:
obj = Reimbursement.objects.get(id=object_id)
if obj and obj.status == 'P':
extra_context = extra_context or {}
extra_context.update(dict(readonly=True))
except Exception as e:
pass
return super(ReimbursementAdmin, self).changeform_view(request, object_id, form_url, extra_context)
class EnrollInline(admin.TabularInline):
model = Enroll
class FeedbackInline(admin.TabularInline):
model = Feedback
class ActivityAdmin(generic.BOAdmin):
CODE_PREFIX = 'AC'
CODE_NUMBER_WIDTH = 5
list_display = ['code', 'begin_time', 'end_time', 'title', 'classification', 'room']
list_display_links = ['code', 'title']
raw_id_fields = ['room', 'parent']
fieldsets = [
(None, {'fields': [('begin_time', 'end_time',), ('title', 'classification',), ('description',),
('host', 'speaker',), ('room', 'location',), ('attach',)]}),
(_('other info'),
{'fields': [('mail_list',), ('parent',), ('mail_notice', 'short_message_notice', 'weixin_notice',)],
'classes': ['collapse']})
]
def get_changeform_initial_data(self, request):
now = datetime.datetime.now()
begin = now + datetime.timedelta(hours=12)
end = begin + datetime.timedelta(hours=6)
return {'begin_time': begin, 'end_time': end}
admin.site.register(WorkOrder, WorkOrderAdmin)
admin.site.register(Loan, LoanAdmin)
admin.site.register(Reimbursement, ReimbursementAdmin)
admin.site.register(Activity, ActivityAdmin)
|
# Generated by Django 2.1.7 on 2019-04-01 18:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hubble', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='subtitle',
field=models.CharField(default='', max_length=100),
),
migrations.AlterField(
model_name='post',
name='title',
field=models.CharField(max_length=50),
),
]
|
"""This script contains code for the Goverment class and it's inherited properties
"""
import logging
from src.eda.parent import Parent
import streamlit as st
import pickle
import pandas as pd
from src.helper import create_logger
from src.eda.graphs import Graph
logger = create_logger('process', 'logs/Government.log',
logging.DEBUG, logging.WARNING)
class Control_Population():
"""Perform CRUD operations.
"""
def __init__(self):
self.population = {}
def add_parent(self, filename, start_date, end_date):
"""Adds Parent into Government controlled Population.
Args:
filename (str): name of Parent(Ticker Symbol)
start_date (date): Start date
end_date (date): [description]
Returns:
True -> Success
False -> Failure
"""
location = 'data/index_csv/' + filename
logger.debug(f"Looking for {location}")
try:
data = pd.read_csv(location)
ticker_list = data["Symbol"]
except:
logger.error(f"Error in Reading {location} File")
st.write(f"Error in Reading {location} File")
return False
logger.debug("Checking population")
for j in self.fetch_parent_by_nate(filename[:-4], start_date, end_date):
logger.warning(f"Found Existing Parent location: {j}")
return False
try:
parent = Parent(filename[:-4], ticker_list, start_date, end_date)
self.population.append(parent)
logger.info("Parent Added")
return True
except:
logger.exception("Error In adding the parent")
return False
def remove_parent(self, filename, start_date, end_date):
"""Remove parent from Government controlled Population.
Args:
filename (str): name of Parent(Ticker Symbol)
start_date (date): Start date
end_date (date): End date
Returns:
True -> Success
False -> Failure
"""
# Check if Parent Exist
try:
logger.info(
f"Looking for parent matching the name: {filename} and start_date:{start_date} and end_date: {end_date}")
for i in self.fetch_parent_by_nate(filename[:-4], start_date, end_date):
logger.info("Found Parent")
self.population.pop(i)
logger.info(f"Successfully removed {filename} parent")
return True
except:
logger.exception("Error while removing the parent")
return False
def fetch_parent_by_name(self, name):
"""Search Parent by name
Args:
name (str): name of parent (Ticker name)
Returns:
List of Parent Object
"""
temp = []
for loc, parent in enumerate(self.population):
if parent.name == name:
temp.append(loc)
return temp
def fetch_parent_by_nate(self, name, start_date, end_date):
"""Search Parent by name as well as date
Args:
name (str): name of parent (Ticker name)
start_date (date): Start date
end_date (date): End date
Returns:
List of Parent Object
"""
temp = []
for loc in self.fetch_parent_by_name(name):
try:
if self.population[loc].start_date == start_date.date() and self.population[loc].end_date == end_date.date():
logger.debug("Found Parent")
temp.append(loc)
except:
if self.population[loc].start_date == start_date and self.population[loc].end_date == end_date:
logger.debug("Found Parent")
temp.append(loc)
return temp
class Government_Rules():
"""This is a Tracker class
"""
def __init__(self):
self.population = {}
def get_population(self):
"""
Returns:
return all the population
"""
return self.population
def population_names(self):
"""
Returns:
return names of all the population
"""
return [parent.name for parent in self.get_population()]
def set_tracker(self):
"""
Description:
Sets the tracker which the framework will point to.
"""
dic = {}
for loc, parent in enumerate(self.population):
dic[loc] = f"{parent.name} FROM {str(parent.start_date)} TO {str(parent.end_date)}"
index = st.selectbox("Select Tracker", list(dic.values()))
if index:
st.write(f"Selected Tracker {index}")
self.track = index
def get_tracker(self):
"""
Returns:
Gets the tracker which the framework is pointing to.
"""
try:
return self.track
except:
logger.exception("Tracker Not Set")
# Check if all the childs have same set of features
# Check if all the child have same size
# Check if specific path exists
# check if data/index_csv
# Check .temp/ exist if not then create it
# Create hyperparameter files
class Government(Control_Population, Government_Rules, Graph):
"""Government Class
Args:
Control_Population (class): CRUD Parents
Government_Rules (class): Tracker class
Graph ([class]): Graph class for visualization
"""
def __init__(self, government_name):
"""Inititates Government.
Args:
government_name (str): Name of the government
"""
self.government = government_name
self.population = []
logger.info("Government Initiated")
def dump(self, location: str):
"""Dumps the government object into a pickle file to a desired location.
Args:
location (str): Name of Location.
"""
try:
with open(location, 'wb') as f:
pickle.dump(self, f)
except:
st.error("Error in dumping File")
@classmethod
def load_government(cls, location: str):
"""Loads the government object from a pickle file save into a specified location.
Args:
location (str): Location of file.
Returns:
File -> None if file not found
Flag -> Error Happened/File not found.
"""
try:
with open(location, 'rb') as f:
return pickle.load(f), True
except:
return None, False
|
from google.appengine.ext import db
class StoryModeStats(db.Model):
"""Models a story mode stats entity with a device ID, nickname, score and a timestamp"""
deviceID = db.StringProperty(required=True)
nickname = db.StringProperty(required=True)
score = db.IntegerProperty(required = True)
timestamp = db.StringProperty(required=True)
class ArcadeModeStats(db.Model):
"""Models a arcade mode stats entity with a device ID, nickname, attempts and a timestamp"""
deviceID = db.StringProperty(required=True)
nickname = db.StringProperty(required=True)
attempts = db.IntegerProperty(required=True)
timestamp = db.StringProperty(required=True) |
# Generated by Django 2.1.7 on 2019-05-10 09:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0043_auto_20190424_1029'),
]
operations = [
migrations.RemoveField(
model_name='statetransition',
name='state',
),
migrations.AddField(
model_name='statetransition',
name='asset_state_from_report',
field=models.CharField(choices=[('requires repair', 'requires repair'), ('requires external assessment', 'requires external assessment'), ('Damaged', 'Damaged')], default='requires repair', max_length=50),
),
migrations.AddField(
model_name='statetransition',
name='incident_report_state',
field=models.CharField(choices=[('newly reported', 'newly reported'), ('internal assessment', 'internal assessment'), ('external assessment', 'external assessment'), ('out for repair', 'out for repair')], default='newly reported', max_length=50),
),
]
|
def is_pair(sym1, sym2):
if sym1 == "(" and sym2 == ")":
return True
if sym1 == "{" and sym2 == "}":
return True
if sym1 == "[" and sym2 == "]":
return True
return False
def check(string):
if string == "":
return "yes"
stack = []
for char in string:
if len(stack) != 0 and is_pair(stack[-1], char):
stack.pop()
else:
stack.append(char)
return "yes" if len(stack) == 0 else "no"
n = int(input())
for i in range(n):
print(check(input()))
|
#! /usr/bin/python2.7
import requests
def getTaskSize():
data = requests.get('https://trouble.physics.byu.edu/api/tasks').json()
ids = [x['id'] for x in data]
return len(ids)
print(getTaskSize())
|
from rest_framework import serializers
from board import models
from .item_serializers import CommentAuthorSerializer
class GroupSerializer(serializers.ModelSerializer):
members = CommentAuthorSerializer(many=True, read_only=True, required=False)
class Meta:
model = models.Group
fields = ['members', 'team_id', 'id', 'name', 'description']
def create(self, validated_data):
validated_data['team_id'] = self.context['view'].kwargs['team_pk']
return serializers.ModelSerializer.create(self, validated_data)
class GroupMembersSerializer(serializers.Serializer):
def update(self, instance, validated_data):
pass
def create(self, validated_data):
return validated_data
members = serializers.PrimaryKeyRelatedField(many=True, queryset=models.Profile.objects.all())
class AssigneeSerializer(serializers.ModelSerializer):
item_id = serializers.PrimaryKeyRelatedField(required=False, read_only=True)
profile = CommentAuthorSerializer(required=False, read_only=True)
group = GroupSerializer(required=False, read_only=True)
profile_id = serializers.PrimaryKeyRelatedField(required=False, allow_null=True, queryset=models.Profile.objects.all())
group_id = serializers.PrimaryKeyRelatedField(required=False, allow_null=True, queryset=models.Group.objects.all())
type = serializers.CharField(source='get_type_display', read_only=True)
class Meta:
model = models.Assignee
fields = ['profile_id', 'group_id', 'type', 'id', 'profile', 'group', 'item_id']
def create(self, validated_data):
item_id = self.context['view'].kwargs['item_pk']
group = validated_data.get('group_id')
profile = validated_data.get('profile_id')
if group is not None:
assignee = models.Assignee(item_id=item_id, group_id=group.id, type=models.Assignee.Type.GROUP)
else:
assignee = models.Assignee(item_id=item_id, profile_id=profile.id, type=models.Assignee.Type.USER)
assignee.save()
return assignee
def validate(self, attrs):
if attrs.get("profile_id") is None and attrs.get("group_id") is None:
raise serializers.ValidationError('Exactly one of [Assignee.profile, Assignee.group] must be set')
return serializers.ModelSerializer.validate(self, attrs)
|
from flask import Flask, render_template, url_for, request, redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
db = SQLAlchemy(app)
class Todo(db.Model):
id = db.Column(db.Integer,primary_key = True)
company_name = db.Column(db.String(30), nullable = False)
email = db.Column(db.String(30), nullable = False)
phone_number = db.Column(db.Integer, nullable = False )
address = db.Column(db.String(300), nullable = False)
ceo_name = db.Column(db.String(30))
date_created = db.Column(db.DateTime, default = datetime.utcnow)
def __repr__(self):
return self.company_name
@app.route('/', methods = ['POST','GET'])
def index():
if request.method == 'POST':
CN = request.form['company_name']
E = request.form['email']
PN = request.form['phone_number']
A = request.form['address']
CN = request.form['ceo_name']
new_task = Todo(company_name = CN , email = E, phone_number = PN , address = A , ceo_name = CN)
try:
db.session.add(new_task)
db.session.commit()
return redirect('/')
except:
return "There was an issue adding your task"
else:
tasks = Todo.query.order_by(Todo.date_created).all()
return render_template('index.html', tasks = tasks)
@app.route('/delete/<int:id>')
def delete(id):
task_to_delete = Todo.query.get_or_404(id)
try:
db.session.delete(task_to_delete)
db.session.commit()
return redirect('/')
except:
return "There was an issue deleting this task"
@app.route('/update/<int:id>', methods = ['GET','POST'])
def update(id):
task = Todo.query.get_or_404(id)
if request.method == 'POST':
task.company_name= request.form['company_name']
task.email = request.form['email']
task.phone_number = request.form['phone_number']
task.address = request.form['address']
task.ceo_name = request.form['ceo_name']
try:
db.session.commit()
return redirect('/')
except:
return "There was an issue updating this task"
else:
return render_template('update.html',task = task)
@app.route('/search', methods = ['GET','POST'])
def search():
if request.method == 'POST':
data = Todo.query.filter_by(phone_number=request.form['phone_number']).all()
if len(data) !=0:
return render_template('index.html',tasks = data)
else:
return "<center><h4>There is no company with given Phone number. Please check the phone number and try again!</h4></center>"
else:
return redirect('/')
if __name__ == "__main__":
app.run(debug=True)
|
"""
(C) Copyright 2018-2023 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import time
from ior_test_base import IorTestBase
from telemetry_test_base import TestWithTelemetry
# Pretty print constant
RESULT_OK = "[\033[32m OK\033[0m]"
RESULT_NOK = "[\033[31mNOK\033[0m]"
# It should take as much as 10s for the vos space metrics to be up to date: it is equal to the idle
# GC ULT interval time.
TIMEOUT_DEADLINE = 15
class TelemetryPoolSpaceMetrics(IorTestBase, TestWithTelemetry):
"""Test telemetry pool space basic metrics.
:avocado: recursive
"""
def __init__(self, *args, **kwargs):
"""Initialize a TelemetryPoolSpaceMetrics object."""
super().__init__(*args, **kwargs)
self.metric_names = [
'engine_pool_vos_space_scm_used',
'engine_pool_vos_space_nvme_used']
self.data_size = 0
self.scm_data_size_percent = None
self.scm_metadata_max_size = 0
self.pool_space_metrics_minmax = None
def setUp(self):
"""Set up each test case."""
# Start the servers and agents
super().setUp()
self.data_size = self.ior_cmd.block_size.value
self.scm_metadata_max_size = self.params.get(
"metadata_max_size", "/run/scm_metric_thresholds/*")
self.pool_space_metrics_minmax = {
"/run/pool_scm/*": {
"engine_pool_vos_space_scm_used": (
self.data_size,
self.data_size + self.scm_metadata_max_size
),
"engine_pool_vos_space_nvme_used": (0, 0)
},
"/run/pool_scm_nvme/*": {
"engine_pool_vos_space_scm_used": (1, self.scm_metadata_max_size),
"engine_pool_vos_space_nvme_used": (self.data_size, self.data_size)
}
}
def get_expected_values_range(self, namespace):
"""Return the expected metrics value output.
This function returns a hash map of pairs defining min and max values of each tested
telemetry metrics. The hash map of pairs returned depends on the pool created with the
given namespace and the size of the data written in it.
Args:
namespace (string): Namespace of the last created pool.
Returns:
expected_values (dict): Dictionary of the expected metrics value output.
"""
self.assertIn(
namespace, self.pool_space_metrics_minmax,
"Invalid pool namespace: {}".format(namespace))
return self.pool_space_metrics_minmax[namespace]
def get_metrics(self, names):
"""Obtain the specified metrics information.
Args:
names (list): List of metric names to query.
Returns:
dict: a dictionary of metric keys linked to their aggregated values.
"""
metrics = {}
for name in names:
metrics[name] = 0
for data in self.telemetry.get_metrics(",".join(names)).values():
for name, value in data.items():
for metric in value["metrics"]:
metrics[name] += metric["value"]
return metrics
def test_telemetry_pool_space_metrics(self):
"""JIRA ID: DAOS-10192
Create a file of 16MiB thanks to ior to verify the DAOS engine IO telemetry vos space
metrics.
Steps:
Create a pool
Create a container
Generate deterministic workload. Using ior to write 128MiB of data.
Use telemetry command to get value of vos space metrics "engine_pool_vos_space_scm_used"
and "engine_pool_vos_space_nvme_used" for all targets.
Verify the sum of all parameter metrics matches the workload.
:avocado: tags=all,daily_regression
:avocado: tags=hw,medium
:avocado: tags=telemetry
:avocado: tags=TelemetryPoolSpaceMetrics,test_telemetry_pool_space_metrics
"""
test_timeouts = {}
for namespace in ["/run/pool_scm/*", "/run/pool_scm_nvme/*"]:
test_name = namespace.split("/")[2]
self.log.debug("Starting test %s", test_name)
# create pool and container
self.add_pool(namespace=namespace, create=True, connect=False)
self.pool.disable_aggregation()
self.add_container(pool=self.pool)
# Run ior command.
self.update_ior_cmd_with_pool(create_cont=False)
self.run_ior_with_pool(
timeout=200, create_pool=False, create_cont=False)
# Testing VOS space metrics
expected_values = self.get_expected_values_range(namespace)
test_counter = 0
timeout = TIMEOUT_DEADLINE
while timeout > 0:
metrics = self.get_metrics(self.metric_names)
is_metric_ok = True
for name in self.metric_names:
val = metrics[name]
min_val, max_val = expected_values[name]
is_metric_ok &= min_val <= val <= max_val
self.log.debug(
"Check of the metric %s: got=%d, wait_in=[%d, %d], ok=%r, timeout=%d",
name, val, min_val, max_val, is_metric_ok, timeout)
test_counter = (test_counter + 1) if is_metric_ok else 0
if test_counter >= 2:
self.log.info(
"Test %s successfully completed in %d sec",
test_name, TIMEOUT_DEADLINE - timeout)
break
time.sleep(1)
timeout -= 1
test_timeouts[test_name] = timeout
self.destroy_containers(self.container)
self.destroy_pools(self.pool)
self.log.info("\n############ Test Results ############")
for test_name, timeout in test_timeouts.items():
self.log.info(
"# Test %s:\t%s",
test_name, RESULT_OK if timeout > 0 else RESULT_NOK)
self.log.info("######################################")
self.assertTrue(
0 not in test_timeouts.values(),
"One or more vos space metric tests have failed")
|
#!/usr/bin/env python
# coding: utf-8
# In[45]:
#sol_1
n = int(input())
if n > 30:
print("n must be less than 30")
print("try again")
else:
for i in range(1, n+1):
if i % 10 == 3 or i % 10 == 6 or i % 10 == 9:
print("X", end = " ")
else:
print(i, end = " ")
# In[47]:
#sol_2
n = int(input())
if n > 30:
print("n must be less than 30")
print("try again")
else:
for i in range(1, n+1):
if '3' in str(i) or '6' in str(i) or '9' in str(i):
print("X", end = ' ')
else:
print(i, end = ' ')
|
#Q4
#Valor inicial: R$ 10000
#Rendimento por período (%): 0.54
#Aporte a cada período: R$ 1000
#Total de períodos: 120
import matplotlib.pyplot as plt
valor_inicial = float(input("Valor de Investimento Inicial R$ "))
rendimento_periodo = float(input("Taxa de Juros % "))
aporte = float(input("Valor Mensal R$ "))
mês = 1
saldo = valor_inicial
periodos = int(input("Em quantos meses de aplicação "))
valor_rentabilidade = []
while mês <= periodos:
rendimento = (saldo * (rendimento_periodo / 100))
saldo = saldo + rendimento + aporte
valor_rentabilidade.append(saldo)
print (f"Após {mês} períodos(s), o montante será de R$ {saldo:.2f} ")
mês = mês + 1
plt.plot(valor_rentabilidade)
plt.show()
|
# Generated by Django 3.0.7 on 2020-07-28 04:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('discussions', '0032_auto_20200727_1243'),
]
operations = [
migrations.AddField(
model_name='groupmember',
name='added_from_gchat',
field=models.BooleanField(default=True),
preserve_default=False,
),
]
|
from flask import Flask, make_response, request, render_template
app = Flask(__name__)
@app.route('/00-test')
def test_views():
return "这是测试的地址"
@app.route('/01-setcookie')
def setcookie():
# 通过make_response构建响应对象
resp = make_response('保存cookies成功')
#1.保存名称为uname值为Maria的cookie,存期为1年
resp.set_cookie('uname','Maria',60*60*24*365)
#2.保存名称为upwd值为123456的cookie,不指定时长
resp.set_cookie('upwd','123456')
# 响应: 保存cookies成功
return resp
@app.route('/02-login',methods=['GET','POST'])
def login_views():
if request.method == 'GET':
#判断uname和upwd是否保存在cookies中
if 'uname' in request.cookies and 'upwd' in request.cookies:
#1.获取uname和upwd的值
uname = request.cookies['uname']
upwd = request.cookies['upwd']
#2.判断值是否为admin
if uname=='admin' and upwd=='admin':
return "欢迎:"+uname
return render_template('02-login.html')
else:
return render_template('02-login.html')
else:
#1.接收用户名和密码
uname = request.form['uname']
upwd = request.form['upwd']
#2.判断用户名和密码的正确性
if uname=='admin' and upwd == 'admin':
#3.如果都为admin,判断是否要记住密码
# 3.1如果记住密码则讲用户名和密码保存进cookies
resp = make_response('登录成功')
if 'isSaved' in request.form:
max_age = 60*60*24*365
resp.set_cookie('uname',uname,max_age)
resp.set_cookie('upwd',upwd,max_age)
return resp
else:
#4.如果不是admin,则给出错误提示
return "用户名或密码错误<a href='/02-login'>登录</a>"
@app.route('/03-getcookie')
def getcookie_views():
# print(request.cookies)
if 'uname' in request.cookies:
print("用户名:"+request.cookies['uname'])
if 'upwd' in request.cookies:
print("密码:"+request.cookies['upwd'])
return "获取cookies成功"
if __name__ == "__main__":
app.run(debug=True,host='0.0.0.0')
|
class Tree:
def __init__(self, key, data):
"Create a new Tree object with empty L & R subtrees."
self.key = key
self.data = data
self.left = self.right = None
def insert(self, key, data):
"Insert a new element into the tree in the correct position."
if key < self.key:
if self.left:
self.left.insert(key, data)
else:
self.left = Tree(key, data)
elif key > self.key:
if self.right:
self.right.insert(key, data)
else:
self.right = Tree(key, data)
else:
raise ValueError("Attempt to insert duplicate value")
def walk(self):
"Generate the keys from the tree in sorted order."
if self.left:
for n, v in self.left.walk():
yield n, v
yield self.key, self.data
if self.right:
for n, v in self.right.walk():
yield n, v
def find(self, key):
if key == self.key:
return self.data
elif key < self.key:
left_right_error = "left"
if self.left:
for n, data in self.left.walk():
if n == key:
return data
elif key > self.key:
left_right_error = "right"
if self.right:
for n, data in self.right.walk():
if n == key:
return data
#raise KeyError("test")
raise KeyError("Tree object has no {0} attribute {1}".format(left_right_error, key))
if __name__ == '__main__':
t = Tree("D", 1)
for c, v in [("B",2),("J",3),("Q",4),("K",5),("F",6),("A",7),("C",8)]:
t.insert(c,v)
print(list(t.walk()))
for c in "DAQ":
try:
print(t.find(c))
except KeyError:
print("Tree object was not found") |
import streamlit as st
import numpy as np
from skimage.io import imread
from skimage.transform import resize
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
import numpy as np
import pickle
st.title('TOMATO DISEASE PREDICTION AND PREVENTION APP')
st.write('This is a tomato disease prediction web app using streamlit.')
st.text('upload an image')
model=pickle.load(open('tomato.pkl','rb'))
uploaded_file=st.file_uploader('choose an image',type='jpg')
if uploaded_file is not None:
img=imread(uploaded_file)
st.image(img,caption='uploaded image')
if st.button('PREDICT'):
CATAGORIES=['tomato_early_blight','tomato_healthy','tomato_late_blight']
st.write('Results')
flat_data=[]
img=np.array(img)
img_resized=resize(img,(150,150,3))
flat_data.append(img_resized.flatten())
flat_data=np.array(flat_data)
y_out=model.predict(flat_data)
q=model.predict_proba(flat_data)
for index,item in enumerate(CATAGORIES):
st.write(f'{item} : {q[0][index]*100}')
y_out=CATAGORIES[y_out[0]]
if y_out=='tomato_early_blight':
st.title('DISEASE:TOMATO EARLY BLIGHT')
st.subheader('HOW TO PREVENT TOMATO EARLY BLIGHT?')
st.write('Tomatoes that have early blight require immediate attention before the disease takes over the plants. Thoroughly spray the plant (bottoms of leaves also) with Bonide Liquid Copper Fungicide concentrate or Bonide Tomato & Vegetable. Both of these treatments are organic. If possible time applications so that 12 hours of dry weather follows applications. A day after treatment, remove the lower branches with sharp razor blade knife. Clean your knife with rubbing alcohol before trimming the next plant to prevent the spread of the disease. Repeat fungicide treatments every 7 to 14 days. Read the label instructions carefully. Do not spray pesticides, fungicides, fertilizers or herbicides when it’s in the high 80’s or 90; you can damage your plants. Water your plants the day before spraying, hydration is important!')
elif y_out=='tomato_healthy':
st.title('DISEASE:NO DISEASE')
st.subheader('your tomato plant is healthy')
else:
st.title('DISEASE:TOMATO LATE BLIGHT')
st.subheader('HOW TO PREVENT TOMATO LATE BLIGHT?')
st.write('Sanitation is the first step in controlling tomato late blight. Clean up all debris and fallen fruit from the garden area. This is particularly essential in warmer areas where extended freezing is unlikely and the late blight tomato disease may overwinter in the fallen fruit.plants should be inspected at least twice a week. Since late blight symptoms are more likely to occur during wet conditions, more care should be taken during those times.For the home gardener, fungicides that contain maneb, mancozeb, chlorothanolil, or fixed copper can help protect plants from late tomato blight. Repeated applications are necessary throughout the growing season as the disease can strike at any time. For organic gardeners, there are some fixed copper products approved for use; otherwise, all infected plants must be immediately removed and destroyed.')
st.write('HOW TO USE')
st.write('step1.first click on the option browse files,it will open the camera and files in your mobile')
st.write('step2.choose any of the option camera or files and take a picture of your tamoto plant')
st.write('step3.after loading the image,click on predict button then it will predict that your tomato plant is diseased or not,if diseased it predicts the disease and inform how to cure it')
st.write('It is made by using Transfer Learning technique')
st.write("Transfer Learning is the reuse of a pre trained model on a new problem.It's currently very popular in deep learning because it can train deep neural network with comparatively little data.This is very useful in data science field since most of real world problems do not have millions of labelled data")
st.write('Accuracy of this model is :85%')
st.write('For more queries please contact:shekharboppanapally944@gmail.com') |
# -*- coding: utf-8 -*-
# @Time : 2021/9/6 8:21
# @File : ninwen.py
# @Author : Rocky C@www.30daydo.com
# 宁稳网
import json
import os
import random
import time
from parsel import Selector
import requests
import warnings
import datetime
import re
import pandas as pd
import validate_key
import pickle
import loguru
warnings.filterwarnings("ignore")
logger = loguru.logger
class NinwenSpider():
def __init__(self):
super(NinwenSpider, self).__init__()
self.session = requests.Session()
self.today = datetime.datetime.now().strftime('%Y-%m-%d')
logger.info(f'{self.today} start to crawl....')
@property
def headers(self):
_header = {
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh,en;q=0.9,en-US;q=0.8,zh-CN;q=0.7",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Host": "www.ninwin.cn",
"Origin": "http://www.ninwin.cn",
"Referer": "http://www.ninwin.cn/index.php?m=u&c=login",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
return _header
@property
def json_headers(self):
headers = {
"Host": "www.ninwin.cn",
"Accept": "application/json, text/javascript, */*; q=0.01",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36",
"Origin": "https://www.ninwin.cn",
"Referer": "https://www.ninwin.cn/index.php?m=u&c=login",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh,en;q=0.9,en-US;q=0.8,zh-CN;q=0.7",
}
return headers
def get_image(self):
rand = int(time.time())
url = f'http://www.ninwin.cn/index.php?m=verify&a=get&rand={rand}'
_headers = {"Referer": "http://www.ninwin.cn/index.php?m=u&c=login",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36"}
r = self.session.get(url=url, headers=_headers)
with open('code.png', 'wb') as fp:
fp.write(r.content)
return r.content
def convert(self, float_str):
try:
return_float = float(float_str)
except:
return_float = None
return return_float
def login(self, code, csrf):
url = 'https://www.ninwin.cn/index.php?m=u&c=login&a=dorun'
data = {
'username': validate_key.username,
'password': validate_key.password,
'code': code,
'backurl': 'https://www.ninwin.cn/',
'invite': '',
'csrf_token': csrf
}
r = self.session.post(url=url, headers=self.json_headers,
data=data
)
ret_js = r.json()
if ret_js.get('state') == 'success':
return ret_js.get('referer')
def get_csrf_token(self):
url = 'http://www.ninwin.cn/index.php?m=u&c=login'
content = self.visit_page(url)
if re.search('value="(.*?)"', content):
csrf = re.search('value="(.*?)"', content).group(1)
return csrf
return None
def get_bond_data(self):
url = 'http://www.ninwin.cn/index.php?m=cb&a=cb_all&show_cb_only=Y&show_listed_only=Y'
content = self.visit_page(url)
if '回售起始日' in content:
logger.info("\n获取数据成功\n")
return content
else:
logger.error('获取数据失败')
return None
def visit_page(self, url, _headers=None):
if _headers is None:
_headers = self.headers
resp = self.session.get(url=url, headers=_headers)
content = resp.text
return content
@property
def columns_name(self):
columns_name_ = [("转债代码", ".//td[2]/text()"),
("转债名称", ".//td[3]/a/text()"),
("满足", ".//td[3]/a/span/@title"),
("发行日期", ".//td[4]/text()"),
("股票代码", ".//td[5]/text()"),
("股票名称", ".//td[6]/text()"),
("行业", ".//td[7]/text()"),
("子行业", ".//td[8]/text()"),
("转债价格", ".//td[9]/text()"),
("本息", ".//td[9]/@title"),
("涨跌", ".//td[10]/spand/text()"),
("日内套利", ".//td[11]/spand/text()"),
("股价", ".//td[12]/text()"),
("正股涨跌", ".//td[13]/spand/text()"),
("剩余本息", ".//td[14]/text()"),
("转股价格", ".//td[15]/text()"),
("转股溢价率", ".//td[16]/text()"),
# ("转股期", ".//td[18]/@title"),
("转股价值", ".//td[17]/text()"),
("距离转股日", ".//td[18]/text()"),
("剩余年限", ".//td[19]/text()"),
("回售年限", ".//td[20]/text()"),
("剩余余额", ".//td[21]/text()"),
# ("余额", ".//td[20]/text()"),
("成交额(百万)", ".//td[22]/text()"),
("转债换手率", ".//td[23]/text()"),
("余额/市值", ".//td[24]/@title"),
("余额/股本", ".//td[25]/text()"),
("股票市值(亿)", ".//td[26]/text()"),
("P/B", ".//td[27]/text()"),
("税前收益率", ".//td[28]/text()"),
("税后收益率", ".//td[29]/text()"),
("税前回售收益", ".//td[30]/text()"),
("税后回售收益", ".//td[31]/text()"),
("回售价值", ".//td[32]/text()"),
("纯债价值", ".//td[33]/text()"),
("弹性", ".//td[34]/text()"),
("信用", ".//td[35]/text()"),
("折现率", ".//td[36]/text()"),
("老式双低", ".//td[37]/text()"),
("老式排名", ".//td[38]/text()"),
("新式双低", ".//td[39]/text()"),
("新式排名", ".//td[40]/text()"),
("热门度", ".//td[41]/text()"),
]
return columns_name_
def patch_fix(self, name, v, node):
if name == '转股价格' and v is None:
return True, node.xpath('.//td[15]/a/text()').extract_first()
return False, None
def parse(self, content):
resp = Selector(text=content)
columns = resp.xpath('//table[@id="cb_hq"]/tbody/tr')
bond_result_list = []
for col in columns:
d = {}
for item in self.columns_name:
v = col.xpath(item[1]).extract_first()
patch, _v = self.patch_fix(item[0], v, col)
if patch:
v = _v
if isinstance(v, str):
v = v.strip()
d[item[0]] = v
bond_result_list.append(d)
return bond_result_list
def dump_excel(self, bond_info_list):
df = pd.DataFrame(bond_info_list)
df.to_excel(f'../data/{self.today}_宁稳.xlsx', encoding="utf8")
def image_recognize(self, img):
files = {'file': img}
data={'sign':validate_key.sign}
url=validate_key.url
r = requests.post(url=url, files=files, data=data,timeout=20)
try:
code = r.json().get('code')
print(r.json())
except Exception as e:
logger.error(e)
raise e
else:
return code
def check_name(self, csrf_token):
url = 'https://www.ninwin.cn/index.php?m=u&c=login&a=checkname'
data = {'csrf_token': csrf_token,
'username': validate_key.username}
r = self.session.post(url=url, headers=self.json_headers, data=data)
def check_cookies(self, csrf, code):
url = f'https://www.ninwin.cn/index.php?m=verify&a=check&csrf_token={csrf}&code={code}'
time.sleep(0.5)
content = self.visit_page(url, _headers=self.json_headers)
def run(self):
csrf = self.get_csrf_token()
while 1:
img = self.get_image()
code = self.image_recognize(img)
print(code)
self.check_name(csrf)
self.check_cookies(csrf, code)
time.sleep(0.5)
ref_url = self.login(code, csrf)
if ref_url is None:
logger.info('识别错误或者密码错误,正在重试.....')
time.sleep(random.randint(1, 5))
continue
self.visit_page(ref_url)
content = self.get_bond_data()
bond_info_list = self.parse(content)
self.dump_excel(bond_info_list)
logger.info('获取结束')
os.remove('code.png')
break
if __name__ == '__main__':
app = NinwenSpider()
app.run()
|
__author__ = 'arkilic'
import numpy as np
def import_image():
pass
def convert_to_npArray():
pass |
#coding:utf-8
from scripts.handler import dbhandler
from scripts.utils.views import *
# data type
DT_Pay = 1
DT_ConsumeGold = 2
DT_DailyActive = 3
DT_CsmGoldByAct = 4
DT_DailyCreate = 5
DT_VipLevel = 6
DT_Subsistence = 7
DT_ItemSoldInMall = 8
DT_LevelDistribution = 9
DT_GoldMoneySurplus = 10
DT_TotalOnlineTime = 11
DT_CsmGoldBySys = 12
DT_DailyCreateCountByTime = 13
DT_CurOnlineCountByTime = 14
class DataSLMgr():
def __init__(self):
self.mDBHandler = None
self.mSLHandler = None
self.mDBNames = []
self.mTabNames = []
def new(self,
name = "ResultDB",
serverId = -1,
dbType = "system",
user = "root",
psswd = "123456",
host = "192.168.0.248",
port = 3306,
dbname = "",
charset = "utf8"):
self.mDBHandler = dbhandler.DBHandler(name, serverId, dbType, user, psswd, host, port, dbname, charset)
self.initDB()
self.initSLHandlers()
return self
def initDB(self):
cv.log("Init databases...", True)
sqlStr = "show databases"
result = self.mDBHandler.executeSql(sqlStr)
for value in result:
self.mDBNames.append(value[0])
if "QueryResult" not in self.mDBNames:
cv.log("Create database QueryResult", True)
sqlStr = "create database if not exists `QueryResult` DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci"
self.mDBHandler.executeSql(sqlStr)
self.mDBHandler.setDBName("QueryResult")
sqlStr = "show tables"
result = self.mDBHandler.executeSql(sqlStr)
for value in result:
self.mTabNames.append(value[0])
if "PayData" not in self.mTabNames:
cv.log("Create table PayData", True)
sqlStr = "CREATE TABLE if not exists `PayData` (\
`Date` int(11) NOT NULL COMMENT '数据时间',\
`ServerID` int(11) NOT NULL COMMENT '服务器ID',\
`Sum` int(11) NOT NULL COMMENT '总充值额度',\
`Count` int(11) NOT NULL COMMENT '总充值次数',\
`AccountCount` int(11) NOT NULL COMMENT '总充值人数',\
PRIMARY KEY (`Date`,`ServerID`)\
) ENGINE=MyISAM DEFAULT CHARSET=utf8;"
self.mDBHandler.executeSql(sqlStr)
if "ConsumeGold" not in self.mTabNames:
cv.log("Create table ConsumeGold", True)
sqlStr = "CREATE TABLE if not exists `ConsumeGold` (\
`Date` int(11) NOT NULL,\
`ServerID` int(11) NOT NULL,\
`Count` int(11) NOT NULL,\
PRIMARY KEY (`Date`,`ServerID`)\
) ENGINE=MyISAM DEFAULT CHARSET=utf8;"
self.mDBHandler.executeSql(sqlStr)
if "DailyActive" not in self.mTabNames:
cv.log("Create table DailyActive", True)
sqlStr = "CREATE TABLE if not exists `DailyActive` (\
`Date` int(11) NOT NULL COMMENT '记录时间',\
`ServerID` int(11) NOT NULL,\
`ActiveCount` int(11) NOT NULL COMMENT '活跃',\
PRIMARY KEY (`Date`,`ServerID`)\
) ENGINE=MyISAM DEFAULT CHARSET=utf8;"
self.mDBHandler.executeSql(sqlStr)
if "ConsumeGoldByAct" not in self.mTabNames:
cv.log("Create table ConsumeGoldByAct", True)
sqlStr = "CREATE TABLE if not exists `ConsumeGoldByAct` (\
`Date` int(11) NOT NULL,\
`ServerID` int(11) NOT NULL,\
`DataByActID` varchar(1024) NOT NULL,\
PRIMARY KEY (`Date`,`ServerID`)\
) ENGINE=MyISAM DEFAULT CHARSET=utf8;"
self.mDBHandler.executeSql(sqlStr)
if "DailyCreate" not in self.mTabNames:
cv.log("Create table DailyCreate", True)
sqlStr = "CREATE TABLE if not exists`DailyCreate` (\
`Date` int(11) NOT NULL,\
`ServerID` int(11) NOT NULL,\
`Count` int(11) NOT NULL,\
PRIMARY KEY (`Date`,`ServerID`)\
) ENGINE=MyISAM DEFAULT CHARSET=utf8;"
self.mDBHandler.executeSql(sqlStr)
if "VipLevel" not in self.mTabNames:
cv.log("Create table VipLevel", True)
sqlStr = "CREATE TABLE if not exists `VipLevel` (\
`Date` int(11) NOT NULL,\
`ServerID` int(11) NOT NULL,\
`Data` varchar(256) NOT NULL,\
PRIMARY KEY (`Date`,`ServerID`)\
) ENGINE=MyISAM DEFAULT CHARSET=utf8;"
self.mDBHandler.executeSql(sqlStr)
if "Subsistence" not in self.mTabNames:
cv.log("Create table Subsistence", True)
sqlStr = "CREATE TABLE if not exists `Subsistence` (\
`Date` int(11) NOT NULL,\
`ServerID` int(11) NOT NULL,\
`Data` varchar(256) NOT NULL,\
PRIMARY KEY (`Date`,`ServerID`)\
) ENGINE=MyISAM DEFAULT CHARSET=utf8;"
self.mDBHandler.executeSql(sqlStr)
if "ItemSoldInMall" not in self.mTabNames:
cv.log("Create table ItemSoldInMall", True)
sqlStr = "CREATE TABLE if not exists `ItemSoldInMall` (\
`Date` int(11) NOT NULL,\
`ServerID` int(11) NOT NULL,\
`DataByItemID` varchar(2048) NOT NULL,\
PRIMARY KEY (`Date`,`ServerID`)\
) ENGINE=MyISAM DEFAULT CHARSET=utf8;"
self.mDBHandler.executeSql(sqlStr)
if "LevelDistribution" not in self.mTabNames:
cv.log("Create table LevelDistribution", True)
sqlStr = "CREATE TABLE if not exists `LevelDistribution` (\
`Date` int(11) NOT NULL,\
`ServerID` int(11) NOT NULL,\
`DataByLevel` varchar(2048) NOT NULL,\
PRIMARY KEY (`Date`,`ServerID`)\
) ENGINE=MyISAM DEFAULT CHARSET=utf8;"
self.mDBHandler.executeSql(sqlStr)
if "GoldMoneySurplus" not in self.mTabNames:
cv.log("Create table GoldMoneySurplus", True)
sqlStr = "CREATE TABLE if not exists `GoldMoneySurplus` (\
`Date` int(11) NOT NULL,\
`ServerID` int(11) NOT NULL,\
`Count` int(11) NOT NULL,\
PRIMARY KEY (`Date`,`ServerID`)\
) ENGINE=MyISAM DEFAULT CHARSET=utf8;"
self.mDBHandler.executeSql(sqlStr)
if "TotalOnlineTime" not in self.mTabNames:
cv.log("Create table TotalOnlineTime", True)
sqlStr = "CREATE TABLE if not exists `TotalOnlineTime` (\
`Date` int(11) NOT NULL,\
`ServerID` int(11) NOT NULL,\
`Count` int(11) NOT NULL,\
PRIMARY KEY (`Date`,`ServerID`)\
) ENGINE=MyISAM DEFAULT CHARSET=utf8;"
self.mDBHandler.executeSql(sqlStr)
if "ConsumeGoldBySys" not in self.mTabNames:
cv.log("Create table ConsumeGoldBySys", True)
sqlStr = "CREATE TABLE if not exists `ConsumeGoldBySys` ( \
`Date` int(11) NOT NULL, \
`ServerID` int(11) NOT NULL,\
`DataBySysUseID` varchar(1024) NOT NULL,\
PRIMARY KEY (`Date`,`ServerID`)\
) ENGINE=MyISAM DEFAULT CHARSET=utf8;"
self.mDBHandler.executeSql(sqlStr)
if "DailyCreateCountByTime" not in self.mTabNames:
cv.log("Create table DailyCreateCountByTime", True)
sqlStr = "CREATE TABLE if not exists `DailyCreateCountByTime` (\
`Date` int(11) NOT NULL,\
`ServerID` int(11) NOT NULL,\
`CountByTime` varchar(2048) NOT NULL,\
PRIMARY KEY (`Date`,`ServerID`)\
) ENGINE=MyISAM DEFAULT CHARSET=utf8;"
self.mDBHandler.executeSql(sqlStr)
if "CurOnlineCountByTime" not in self.mTabNames:
cv.log("Create table CurOnlineCountByTime", True)
sqlStr = "CREATE TABLE if not exists `CurOnlineCountByTime` (\
`Date` int(11) NOT NULL,\
`ServerID` int(11) NOT NULL,\
`CountByTime` varchar(2048) NOT NULL,\
PRIMARY KEY (`Date`,`ServerID`)\
) ENGINE=MyISAM DEFAULT CHARSET=utf8;"
self.mDBHandler.executeSql(sqlStr)
cv.log("Init databases has done!", True)
def initSLHandlers(self):
self.mSLHandler = {
DT_Pay : self.updatePay,
DT_ConsumeGold : self.updateConsumeGold,
DT_DailyActive : self.updateDailyActive,
DT_CsmGoldByAct : self.updateCsmGoldByAct,
DT_DailyCreate : self.updateDailyCreate,
DT_VipLevel : self.updateVipLevel,
DT_Subsistence : self.updateSubsistence,
DT_ItemSoldInMall : self.updateItemSoldInMall,
DT_LevelDistribution : self.updateLevelDistribution,
DT_GoldMoneySurplus : self.updateGoldMoneySurplus,
DT_TotalOnlineTime : self.updateTotalOnlineTime,
DT_CsmGoldBySys : self.updateCsmGoldBySys,
DT_DailyCreateCountByTime : self.updateDailyCreateCountByTime,
DT_CurOnlineCountByTime : self.updateCurOnlineCountByTime
}
def updateData(self, dtType, recordTS, serverId, params):
if dtType in self.mSLHandler:
self.mSLHandler[dtType](recordTS, serverId, params)
else:
cv.err("Data SL type is not existing!", True)
def readData(self):
pass
def writeData(self):
pass
def updatePay(self, recordTS, serverId, params):
dataInDB = self.mDBHandler.executeSql("select * from PayData where Date = %s and ServerID = %s " %(recordTS, serverId))
if len(dataInDB) > 0:
updateSQL = "update PayData set Date = %s, ServerID = %s " %(recordTS, serverId)
if "Sum" in params:
updateSQL += ",Sum = %s " % (params["Sum"])
if "Count" in params:
updateSQL += ",Count = %s " % (params["Count"])
if "AccountCount" in params:
updateSQL += ",AccountCount = %s " % (params["AccountCount"])
updateSQL = updateSQL + "where Date = %s and ServerID = %s " %(recordTS, serverId)
else:
if "Sum" in params:
Sum = params["Sum"]
else:
Sum = 0
if "Count" in params:
Count = params["Count"]
else:
Count = 0
if "AccountCount" in params:
AccountCount = params["AccountCount"]
else:
AccountCount = 0
updateSQL = "insert into PayData values(%s, %s, %s, %s, %s)" %(recordTS, serverId, Sum, Count, AccountCount)
self.mDBHandler.executeSql(updateSQL)
def updateConsumeGold(self, recordTS, serverId, params):
dataInDB = self.mDBHandler.executeSql("select * from ConsumeGold where Date = %s and ServerID = %s " %(recordTS, serverId))
if len(dataInDB) > 0:
updateSQL = "update ConsumeGold set Date = %s, ServerID = %s " %(recordTS, serverId)
if "Count" in params:
updateSQL += ",Count = %s " % (params["Count"])
updateSQL = updateSQL + "where Date = %s and ServerID = %s " %(recordTS, serverId)
else:
if "Count" in params:
Count = params["Count"]
else:
Count = 0
updateSQL = "insert into ConsumeGold values(%s, %s, %s)" %(recordTS, serverId, Count)
self.mDBHandler.executeSql(updateSQL)
def updateDailyActive(self, recordTS, serverId, params):
dataInDB = self.mDBHandler.executeSql("select * from DailyActive where Date = %s and ServerID = %s " %(recordTS, serverId))
if len(dataInDB) > 0:
updateSQL = "update DailyActive set Date = %s, ServerID = %s " %(recordTS, serverId)
if "ActiveCount" in params:
updateSQL += ",ActiveCount = %s " % (params["ActiveCount"])
updateSQL = updateSQL + "where Date = %s and ServerID = %s " %(recordTS, serverId)
else:
if "ActiveCount" in params:
ActiveCount = params["ActiveCount"]
else:
ActiveCount = 0
updateSQL = "insert into DailyActive values(%s, %s, %s)" %(recordTS, serverId, ActiveCount)
self.mDBHandler.executeSql(updateSQL)
def updateCsmGoldByAct(self, recordTS, serverId, params):
dataInDB = self.mDBHandler.executeSql("select * from ConsumeGoldByAct where Date = %s and ServerID = %s " %(recordTS, serverId))
if len(dataInDB) > 0:
updateSQL = "update ConsumeGoldByAct set Date = %s, ServerID = %s " %(recordTS, serverId)
if "DataByActID" in params:
updateSQL += ", DataByActID = '%s' " % (params["DataByActID"])
updateSQL = updateSQL + "where Date = %s and ServerID = %s " %(recordTS, serverId)
else:
if "DataByActID" in params:
DataByActID = params["DataByActID"]
else:
DataByActID = ""
updateSQL = "insert into ConsumeGoldByAct values(%s, %s, '%s')" %(recordTS, serverId, DataByActID)
self.mDBHandler.executeSql(updateSQL)
def updateDailyCreate(self, recordTS, serverId, params):
dataInDB = self.mDBHandler.executeSql("select * from DailyCreate where Date = %s and ServerID = %s " %(recordTS, serverId))
if len(dataInDB) > 0:
updateSQL = "update DailyCreate set Date = %s, ServerID = %s " %(recordTS, serverId)
if "Count" in params:
updateSQL += ", Count = %s " % (params["Count"])
updateSQL = updateSQL + "where Date = %s and ServerID = %s " %(recordTS, serverId)
else:
if "Count" in params:
Count = params["Count"]
else:
Count = 0
updateSQL = "insert into DailyCreate values(%s, %s, %s)" %(recordTS, serverId, Count)
self.mDBHandler.executeSql(updateSQL)
def updateVipLevel(self, recordTS, serverId, params):
dataInDB = self.mDBHandler.executeSql("select * from VipLevel where Date = %s and ServerID = %s " %(recordTS, serverId))
if len(dataInDB) > 0:
updateSQL = "update VipLevel set Date = %s, ServerID = %s " %(recordTS, serverId)
if "Data" in params:
updateSQL += ", Data = '%s' " % (params["Data"])
updateSQL = updateSQL + "where Date = %s and ServerID = %s " %(recordTS, serverId)
else:
if "Data" in params:
Data = params["Data"]
else:
Data = "0;\
0;0;0;0;0;\
0;0;0;0;0;\
0;0;0;0;0;"
updateSQL = "insert into VipLevel values(%s, %s, '%s')" %(recordTS, serverId, Data)
self.mDBHandler.executeSql(updateSQL)
def updateSubsistence(self, recordTS, serverId, params):
dataInDB = self.mDBHandler.executeSql("select * from Subsistence where Date = %s and ServerID = %s " %(recordTS, serverId))
if len(dataInDB) > 0:
updateSQL = "update Subsistence set Date = %s, ServerID = %s " %(recordTS, serverId)
try:
Count = params["Count"]
Type = params["Type"]
dataDict = self.protoData(dataInDB[0][2])
dataDict[Type] = Count
Data = ""
for t in dataDict:
Data += "%s:%s;" %(t, dataDict[t])
updateSQL += ", Data = '%s' " % (Data)
except:
cv.err("Failed to write down data in updateSubsistence", True)
updateSQL = updateSQL + "where Date = %s and ServerID = %s " %(recordTS, serverId)
else:
try:
Count = params["Count"]
Type = params["Type"]
dataDict = self.protoData()
dataDict[Type] = Count
Data = ""
for t in dataDict:
Data += "%s:%s;" %(t, dataDict[t])
except:
cv.log("Using default data in updateSubsistence!", True)
Data = "2:0;3:0;7:0;15:0;30:0;"
updateSQL = "insert into Subsistence values(%s, %s, '%s')" %(recordTS, serverId, Data)
self.mDBHandler.executeSql(updateSQL)
def protoData(self, dataStr = None):
if dataStr:
data = {}
pairs = dataStr.split(";")
for pair in pairs:
kv = pair.split(":")
if len(kv) == 2:
data[int(kv[0])] = kv[1]
return data
else:
return {2:0, 3:0, 7:0, 15:0, 30:0,}
def updateItemSoldInMall(self, recordTS, serverId, params):
dataInDB = self.mDBHandler.executeSql("select * from ItemSoldInMall where Date = %s and ServerID = %s " %(recordTS, serverId))
if len(dataInDB) > 0:
updateSQL = "update ItemSoldInMall set Date = %s, ServerID = %s " %(recordTS, serverId)
if "DataByItemID" in params:
updateSQL += ", DataByItemID = '%s' " % (params["DataByItemID"])
updateSQL = updateSQL + "where Date = %s and ServerID = %s " %(recordTS, serverId)
else:
if "DataByItemID" in params:
DataByItemID = params["DataByItemID"]
else:
DataByItemID = ""
updateSQL = "insert into ItemSoldInMall values(%s, %s, '%s')" %(recordTS, serverId, DataByItemID)
self.mDBHandler.executeSql(updateSQL)
def updateLevelDistribution(self, recordTS, serverId, params):
dataInDB = self.mDBHandler.executeSql("select * from LevelDistribution where Date = %s and ServerID = %s " %(recordTS, serverId))
if len(dataInDB) > 0:
updateSQL = "update LevelDistribution set Date = %s, ServerID = %s " %(recordTS, serverId)
if "DataByLevel" in params:
updateSQL += ", DataByLevel = '%s' " % (params["DataByLevel"])
updateSQL = updateSQL + "where Date = %s and ServerID = %s " %(recordTS, serverId)
else:
if "DataByLevel" in params:
DataByLevel = params["DataByLevel"]
else:
DataByLevel = ""
updateSQL = "insert into LevelDistribution values(%s, %s, '%s')" %(recordTS, serverId, DataByLevel)
self.mDBHandler.executeSql(updateSQL)
def updateGoldMoneySurplus(self, recordTS, serverId, params):
dataInDB = self.mDBHandler.executeSql("select * from GoldMoneySurplus where Date = %s and ServerID = %s " %(recordTS, serverId))
if len(dataInDB) > 0:
updateSQL = "update GoldMoneySurplus set Date = %s, ServerID = %s " %(recordTS, serverId)
if "Count" in params:
updateSQL += ", Count = %s " % (params["Count"])
updateSQL = updateSQL + "where Date = %s and ServerID = %s " %(recordTS, serverId)
else:
if "Count" in params:
Count = params["Count"]
else:
Count = 0
updateSQL = "insert into GoldMoneySurplus values(%s, %s, %s)" %(recordTS, serverId, Count)
self.mDBHandler.executeSql(updateSQL)
def updateTotalOnlineTime(self, recordTS, serverId, params):
dataInDB = self.mDBHandler.executeSql("select * from TotalOnlineTime where Date = %s and ServerID = %s " %(recordTS, serverId))
if len(dataInDB) > 0:
updateSQL = "update TotalOnlineTime set Date = %s, ServerID = %s " %(recordTS, serverId)
if "Count" in params:
updateSQL += ", Count = %s " % (params["Count"])
updateSQL = updateSQL + "where Date = %s and ServerID = %s " %(recordTS, serverId)
else:
if "Count" in params:
Count = params["Count"]
else:
Count = 0
updateSQL = "insert into TotalOnlineTime values(%s, %s, %s)" %(recordTS, serverId, Count)
self.mDBHandler.executeSql(updateSQL)
def updateCsmGoldBySys(self, recordTS, serverId, params):
dataInDB = self.mDBHandler.executeSql("select * from ConsumeGoldBySys where Date = %s and ServerID = %s " %(recordTS, serverId))
if len(dataInDB) > 0:
updateSQL = "update ConsumeGoldBySys set Date = %s, ServerID = %s " %(recordTS, serverId)
if "DataBySysUseID" in params:
updateSQL += ", DataBySysUseID = '%s' " % (params["DataBySysUseID"])
updateSQL = updateSQL + "where Date = %s and ServerID = %s " %(recordTS, serverId)
else:
if "DataBySysUseID" in params:
DataBySysUseID = params["DataBySysUseID"]
else:
DataBySysUseID = ""
updateSQL = "insert into ConsumeGoldBySys values(%s, %s, '%s')" %(recordTS, serverId, DataBySysUseID)
self.mDBHandler.executeSql(updateSQL)
def updateDailyCreateCountByTime(self, recordTS, serverId, params):
dataInDB = self.mDBHandler.executeSql("select * from DailyCreateCountByTime where Date = %s and ServerID = %s " %(recordTS, serverId))
if len(dataInDB) > 0:
updateSQL = "update DailyCreateCountByTime set Date = %s, ServerID = %s " %(recordTS, serverId)
if "CountByTime" in params:
updateSQL += ", CountByTime = '%s' " % (params["CountByTime"])
updateSQL = updateSQL + "where Date = %s and ServerID = %s " %(recordTS, serverId)
else:
if "CountByTime" in params:
CountByTime = "%s" % (params["CountByTime"])
else:
CountByTime = ""
updateSQL = "insert into DailyCreateCountByTime values(%s, %s, '%s')" %(recordTS, serverId, CountByTime)
self.mDBHandler.executeSql(updateSQL)
def updateCurOnlineCountByTime(self, recordTS, serverId, params):
dataInDB = self.mDBHandler.executeSql("select * from CurOnlineCountByTime where Date = %s and ServerID = %s " %(recordTS, serverId))
if len(dataInDB) > 0:
updateSQL = "update CurOnlineCountByTime set Date = %s, ServerID = %s " %(recordTS, serverId)
if "CountByTime" in params:
updateSQL += ", CountByTime = '%s' " % (params["CountByTime"])
updateSQL = updateSQL + "where Date = %s and ServerID = %s " %(recordTS, serverId)
else:
if "CountByTime" in params:
CountByTime = "%s" % (params["CountByTime"])
else:
CountByTime = ""
updateSQL = "insert into CurOnlineCountByTime values(%s, %s, '%s')" %(recordTS, serverId, CountByTime)
self.mDBHandler.executeSql(updateSQL)
|
import logging
from functools import wraps
from .utils import redirect_to_terms, is_eligible_to_redirect
logger = logging.getLogger(__name__)
def terms_checker(view_func):
@wraps(view_func)
def _wrapped_view(view, request, *args, **kwargs):
if is_eligible_to_redirect(request):
return redirect_to_terms(request)
return view_func(view, request, *args, **kwargs)
return _wrapped_view
|
import random
# Soldier
class Soldier:
def __init__(self, health, strength):
self.health = health
self.strength = strength
def attack(self):
return self.strength
def receiveDamage(self, the_damage):
self.health = self.health - the_damage
# Viking
class Viking(Soldier):
def __init__(self,name, health, strength):
super().__init__(health, strength)
self.name = name
def attack(self):
return super().attack()
def receiveDamage(self, the_damage):
self.health = self.health - the_damage
if self.health > 0:
return print(f"{self.name} has received {damage} points of damage")
else:
return print(f"{self.name} has died in act of combat")
def battlecry():
return print('Odin Owns You All!')
# Saxon
class Saxon(Soldier):
def __init__(self, health, strength):
super().__init__(health, strength)
def attack(self):
return super().attack()
def receiveDamage(self, the_damage):
self.health = self.health - the_damage
if self.health > 0:
return print(f"A Saxon has received {the_damage} points of damage")
else:
return print("A Saxon has died in combat")
# War
class War():
def __init__(self):
self.vikingArmy =[]
self.Armyaxon = []
def addViking(self, Viking):
self.vikingArmy.append(Viking)
def addSaxon(self, Saxon):
self.Armyaxon.append(Saxon)
def vikingAttack(self):
sajon_elegido = random.choice(self.Armyaxon)
vikingo_elegido = random.choice(self.vikingArmy)
resultado_ataque =sajon_elegido.receiveDamage(vikingo_elegido.strength)
if sajon_elegido.health <= 0:
self.saxonArmy.remove(sajon_elegido)
return resultado_ataque
def saxonAttack(self):
sajon_elegido = random.choice(self.Armyaxon)
vikingo_elegido = random.choice(self.vikingArmy)
resultado_ataque =vikingo_elegido.receiveDamage(sajon_elegido.strength)
if vikingo_elegido.health <= 0:
self.vikingArmy.remove(vikingo_elegido)
return resultado_ataque
def showStatus(self):
saxon_count = self.Armyaxon.count()
vikingo_count = self.vikingArmy.count()
if saxon_count == 0:
return print('Vikings have won the war of the century')
if vikingo_count == 0:
return print("Saxons have fought for their lives and survive another day...")
if saxon_count > 0 or vikingo_count > 0:
return print('Vikings and Saxons are still in the thick of battle')
|
from django.db.models import Q
from django.forms.widgets import SelectMultiple, CheckboxSelectMultiple
import django_filters
from mptt.forms import TreeNodeChoiceField, TreeNodeMultipleChoiceField
from .models import Framework
#TODO: Move this somewhere else (forms)
class FrameworkFilter(django_filters.FilterSet):
'''
Filtering for Entries
'''
q=django_filters.CharFilter(method='filter_search', label='Search', help_text='You can search by framework name, description or creator')
o = django_filters.OrderingFilter(fields=[('likes', 'likes'),
('updated', 'updated')])
is_open_source = django_filters.BooleanFilter(method='is_open_source_filter', label='Is open source')
class Meta:
model = Framework
fields = ('q', 'languages', 'target_platforms', 'editor_platforms', 'is_free', 'is_open_source',
'is_royalty_free', 'is_2d', 'is_3d')
#TODO: switcher between Free as beer, Free as language, open_source
def is_open_source_filter(self, queryset, name, value):
if value is True:
return queryset.exclude(repository_url__isnull=True)
elif value is False:
return queryset.filter(repository_url__isnull=True)
else:
return queryset
def filter_search(self, queryset, name, value):
return queryset.filter(Q(title__icontains=value) | Q(description__icontains=value) | Q(user__username__iexact=value))
#TODO: Greatly improve this to have fields search query(includes name, descript, user), category and tags
|
import torch
import oscillation
def get_pde_res(h_val: torch.Tensor, input: torch.Tensor, device=oscillation.DEVICE):
ones = torch.unsqueeze(torch.ones(len(input), dtype=oscillation.DTYPE, device=device), 1)
predicted_h_d = torch.autograd.grad(
h_val,
input,
create_graph=True,
grad_outputs=ones
)[0]
predicted_h_dt = predicted_h_d[:, 0:1]
predicted_h_dtd = torch.autograd.grad(
predicted_h_dt,
input,
create_graph=True,
grad_outputs=ones
)[0]
predicted_h_dtdt = predicted_h_dtd[:, 0:1]
residual = \
1 * predicted_h_dtdt + .1 * h_val # 1 * predicted_h_dt
return residual
def get_bc():
return {
(0, 0): lambda t, x: 12
}
|
from flask import Blueprint, Response, request
from flask_jwt_extended import create_access_token, jwt_required, get_jwt_identity
from database.users import User
from flask_bcrypt import generate_password_hash, check_password_hash
import datetime
import json
user_blueprint = Blueprint('users', __name__)
@user_blueprint.route('/users_signup', methods=['POST'])
def sign_up():
body = request.get_json()
user = User(**body)
user.password = generate_password_hash(user.password).decode('utf8')
user.save()
id = user.id
return {'id': str(id)}, 200
@user_blueprint.route('/users_login', methods=['POST'])
def login():
body = request.get_json()
user = User.objects.get(email=body.get('email'))
authorized = check_password_hash(user.password, body.get('password'))
if not authorized:
return {'error': 'Email or password invalid'}, 401
expires = datetime.timedelta(days=7)
access_token = create_access_token(identity=({
'role' : user['role'],
'username' : user['username']
}), expires_delta=expires)
return {
'token': access_token,
'username': user.username,
'email': user.email,
'password': user.password,
'role': user.role
}, 200
@user_blueprint.route('/users')
@jwt_required
def get_users():
users = User.objects().to_json()
return Response(users, mimetype="application/json", status=200)
@user_blueprint.route('/users/<id>')
@jwt_required
def get_user(id):
user = User.objects.get(id=id).to_json()
return Response(user, mimetype="application/json", status=200)
# @user_blueprint.route('/users', methods=['POST'])
# @jwt_required
# def add_user():
# body = request.get_json()
# user = User(**body)
# user.password = generate_password_hash(user.password).decode('utf8')
# user.save()
# id = user.id
# return {'id': str(id)}, 200
# @jwt_required
# @user_blueprint.route('/users/<id>', methods=['PUT'])
# def update_user(id):
# body = request.get_json()
# User.objects.get(id=id).update(**body)
# return '', 200
# @jwt_required
# @user_blueprint.route('/users/<id>', methods=['DELETE'])
# def delete_user(id):
# User.objects.get(id=id).delete()
# return '', 200 |
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
#initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
rawCapture = PiRGBArray(camera)
#allow camera to warmup
time.sleep(0.1)
#grab an image from camera
camera.capture(rawCapture, format = "bgr")
image = rawCapture.array
#display the image on screen and wait for a keypress
#cv2.imshow("image",image)
#cv2.waitkey(0)
cv2.imwrite('image.jpg',image)
|
"""Module only used for the follow part of the script"""
from .actions import Actions
from .time_util import sleep
from selenium.webdriver.common.keys import Keys
def follow_from_recommended(browser, amount):
"""Follows given amount of users from the who to follow list"""
followed = 0
last_length = 0
#Click on the view all button on the main page to load all the recommended accounts
browser.get('https://twitter.com/who_to_follow')
body_elem = browser.find_element_by_tag_name('body')
timeline = browser.find_elements_by_xpath('//div[@id = "timeline"]/div/div[@class = "stream"]/ol/li/div/div[@class = "follow-bar"]/div/span/button[1]')
while len(timeline) < amount and len(timeline) > last_length:
last_length = len(timeline)
body_elem.send_keys(Keys.END)
sleep(2)
body_elem.send_keys(Keys.HOME)
sleep(2)
timeline = browser.find_elements_by_xpath(
'//div[@id = "timeline"]/div/div[@class = "stream"]/ol/li/div/div[@class = "follow-bar"]/div/span/button[1]')
if len(timeline) > amount:
followed = amount
else:
followed = len(timeline)
action_chain = Actions(browser)
for index, button in enumerate(timeline[:followed]):
action_chain.move_to_element(button)
action_chain.wait(1)
action_chain.click()
action_chain.wait(1)
action_chain.print_it(str(index + 1) + '/' + str(followed))
action_chain.perform()
sleep(1)
return followed |
import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init, bias_init_with_prob, ConvModule,constant_init
from mmdet.core import multi_apply, bbox2roi, matrix_nms
from ..builder import HEADS, build_loss, build_head
from scipy import ndimage
import pdb
import matplotlib.pyplot as plt
from torch.nn import functional as F
@HEADS.register_module
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
def init_weights(self):
pass
@HEADS.register_module
class _NonLocalBlockND(nn.Module):
def __init__(self, in_channels, inter_channels=None, dimension=2, sub_sample=False, bn_layer=True):
super(_NonLocalBlockND, self).__init__()
assert dimension in [1, 2, 3]
self.dimension = dimension
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
bn = nn.BatchNorm3d
elif dimension == 2:
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=(2))
bn = nn.BatchNorm1d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if bn_layer:
self.W = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
bn(self.in_channels)
)
nn.init.constant_(self.W[1].weight, 0)
nn.init.constant_(self.W[1].bias, 0)
else:
self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
nn.init.constant_(self.W.weight, 0)
nn.init.constant_(self.W.bias, 0)
# self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
# kernel_size=1, stride=1, padding=0)
# self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
# kernel_size=1, stride=1, padding=0)
# self.sum_after = conv_nd(in_channels=self.in_channels, out_channels=self.in_channels,
# kernel_size=1, stride=1, padding=0)
self.human_theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.human_phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if sub_sample:
self.g = nn.Sequential(self.g, max_pool_layer)
self.phi = nn.Sequential(self.phi, max_pool_layer)
def forward(self, feats_all=None,human_feats=None):
'''
:param x: (b, c, t, h, w)
:return:
'''
query = feats_all
key = feats_all
value = feats_all
batch_size = feats_all.size(0)
# human_map=human_map.reshape(40,40,40,40)
# for i in range(0,40,6):
# for j in range(0,40,6):
# plt.plot(j,i,'ks')
# plt.imshow(human_map[i][j].cpu().numpy())
# plt.show()
g_x = self.g(value).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
# theta_x = self.theta(query).view(batch_size, self.inter_channels, -1)
#human_query = self.human_theta(human_feats).view(batch_size, self.inter_channels, -1).permute(0, 2, 1)
# theta_x=torch.cat([theta_x,human_query],dim=1)
# theta_x = theta_x.permute(0, 2, 1)
human_key =self.human_phi(human_feats).view(batch_size, self.inter_channels, -1)
# phi_x = self.phi(key).view(batch_size, self.inter_channels, -1)
# phi_x=torch.cat([phi_x,human_key],dim=1)
f = torch.matmul(human_key.permute(0, 2, 1), human_key)
# f = (self.inter_channels*2 ** -.5) * f
f = F.softmax(f, dim=-1)
# f=f.reshape(52,52,52,52)
# for i in range(0,52,3):
# for j in range(0,52,3):
# plt.plot(j,i,'ks')
# plt.imshow(f[i][j].cpu().numpy())
# plt.show()
f_div_C = f
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *feats_all.size()[2:])
W_y = self.W(y)
# for i in range(W_y.shape[1]):
# plt.subplot(1,3,1)
# plt.imshow(W_y[0][i].cpu().numpy())
# plt.subplot(1,3,2)
# plt.imshow(feats_all[0][i].cpu().numpy())
# plt.subplot(1,3,3)
# plt.imshow((W_y+feats_all)[0][i].cpu().numpy())
# plt.show()
return W_y+feats_all
def minmaxscaler(self,data):
amax=torch.max(data)
amin=torch.min(data)
return (data-amin)/(amax-amin)
class NONLocalBlock1D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=False, bn_layer=True):
super(NONLocalBlock1D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=1, sub_sample=sub_sample,
bn_layer=bn_layer)
@HEADS.register_module
class NONLocalBlock2D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=False, bn_layer=True):
super(NONLocalBlock2D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=2, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock3D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=False, bn_layer=True):
super(NONLocalBlock3D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=3, sub_sample=sub_sample,
bn_layer=bn_layer)
|
import json as json_mod
import os
import sys
import click
from leapp.tool.utils import find_project_basedir, load_all_from, get_project_name
from leapp.models import get_models
from leapp.actors import get_actors, get_actor_metadata
from leapp.channels import get_channels
def is_local(base_dir, cls):
return os.path.realpath(sys.modules[cls.__module__].__file__).startswith(base_dir)
def print_group(name, items):
sys.stdout.write('{group}:\n'.format(group=name))
for item in items:
sys.stdout.write(' - {name:<25} ./{path}\n'.format(name=item.__name__, path=get_class_file(item)))
sys.stdout.write('\n')
def get_class_file(cls):
path = os.path.abspath(sys.modules[cls.__module__].__file__.replace('.pyc', '.py'))
return os.path.relpath(path, find_project_basedir('.'))
def get_actor_details(actor):
meta = get_actor_metadata(actor)
meta['produces'] = tuple(model.__name__ for model in meta['produces'])
meta['consumes'] = tuple(model.__name__ for model in meta['consumes'])
meta['path'] = get_class_file(actor)
return meta
def get_channel_details(channel):
return {
'name': channel().name,
'path': get_class_file(channel)
}
def get_model_details(model):
return {
'path': get_class_file(model)
}
@click.command('discover')
@click.option('--json', is_flag=True)
def cli(json):
base_dir = find_project_basedir('.')
load_all_from(base_dir)
actors = [actor for actor in get_actors() if is_local(base_dir, actor)]
models = [model for model in get_models() if is_local(base_dir, model)]
channels = [channel for channel in get_channels() if is_local(base_dir, channel)]
if not json:
print_group('Models', models)
print_group('Channels', channels)
print_group('Actors', actors)
else:
output = {
'project': get_project_name(base_dir),
'base_dir': base_dir,
'channels': {channel.__name__: get_channel_details(channel) for channel in channels},
'models': {model.__name__: get_model_details(model) for model in models},
'actors': {actor.__name__: get_actor_details(actor) for actor in actors}
}
json_mod.dump(output, sys.stdout, indent=2)
|
class Solution:
def replaceDigits(self, s: str) -> str:
result = ''
even_char = ''
for index, value in enumerate(s):
if index % 2 == 0:
even_char = value
result += value
else:
# Unicode のコードポイントを進める
result += chr(ord(even_char) + int(value))
return result
# 模範解答
# https://leetcode.com/problems/replace-all-digits-with-characters/discuss/1186143/Python-1-Liner-%2B-Simple-Readable
'''
class Solution:
def replaceDigits(self, s: str) -> str:
return ''.join(chr(ord(s[i-1]) + int(s[i])) if s[i].isdigit() else s[i] for i in range(len(s)))
'''
'''
class Solution:
def replaceDigits(self, s: str) -> str:
answer = []
for i, char in enumerate(s):
# isdigit(): 文字列が数字だったら
if char.isdigit(): char = chr(ord(s[i-1]) + int(char))
# += だと新しいインスタンスを生成するので、できるだけ append や join を利用すること
answer.append(char)
return ''.join(answer)
'''
|
from dolfin import *
from block import block_mat, block_vec, block_transpose
from block.iterative import MinRes
from block.algebraic.petsc import AMG
import rigid_motions
# Optimization options for the form compiler
parameters["form_compiler"]["cpp_optimize"] = True
parameters["form_compiler"]["representation"] = "uflacs"
def lagrange_mixed(lmbda, mu, f, h, mesh, Z=None):
'''
Solves
-div(sigma) = f in Omega
sigma.n = h on boundary
where sigma(u) = 2*mu*eps(u) + lambda*div(u)*I. The problem is reformulated by
Lagrange multiplier nu to inforce orthogonality with the space of rigid
motions. To get robustnes in lmbda solid pressure p = lambda*div u is introduced.
The system to be solved with MinRes is
P*[A C B; *[u, = P*[L,
C' D 0; p, 0,
B' 0 0] nu] 0]
with P a precondtioner. We run on series of meshes to show mesh independence
of the solver.
'''
if not isinstance(mesh, Mesh):
# NOTE: You can precompute the 'symbolic' basis and pass it here
return [lagrange_mixed(lmbda, mu, f, h, mesh_) for mesh_ in mesh]
# For cube
V = VectorFunctionSpace(mesh, 'CG', 2)
Q = FunctionSpace(mesh, 'CG', 1)
u, v = TrialFunction(V), TestFunction(V)
p, q = TrialFunction(Q), TestFunction(Q)
# Strain
epsilon = lambda u: sym(grad(u))
# Stress
gdim = mesh.geometry().dim()
sigma = lambda u: 2*mu*epsilon(u) + lmbda*tr(epsilon(u))*Identity(gdim)
a = 2*mu*inner(sym(grad(u)), sym(grad(v)))*dx
A = assemble(a)
c = inner(div(v), p)*dx
C = assemble(c)
d = -(inner(p, q)/Constant(lmbda))*dx
D = assemble(d)
m = inner(u, v)*dx
M = assemble(m)
# NOTE: Avoiding use of Q space in the assembly - dense blocks!
X = VectorFunctionSpace(mesh, 'R', 0, dim=6)
Zh = rigid_motions.RMBasis(V, X, Z) # L^2 orthogonal
B = M*Zh
# System operator
AA = block_mat([[A, C, B],
[block_transpose(C), D, 0],
[block_transpose(B), 0, 0]])
# Right hand side
L = inner(f, v)*dx + inner(h, v)*ds
b0 = assemble(L)
b1 = assemble(inner(Constant(0), q)*dx)
# Equivalent to assemble(inner(Constant((0, )*6), q)*dx) but cheaper
b2 = Function(X).vector()
bb = block_vec([b0, b1, b2])
# Block diagonal preconditioner
IV = assemble(a + m)
IQ = assemble(inner(p, q)*dx)
IX = rigid_motions.identity_matrix(X)
BB = block_mat([[AMG(IV), 0, 0],
[0, AMG(IQ), 0],
[0, 0, IX]])
# Solve, using random initial guess
x0 = AA.create_vec()
[as_backend_type(xi).vec().setRandom() for xi in x0]
AAinv = MinRes(AA, precond=BB, initial_guess=x0, maxiter=120, tolerance=1E-8,
show=2, relativeconv=True)
x = AAinv*bb
# # Functions from coefficients
# # uh = Function(V, x[0]) # Displacement
# # ph = Function(Q, x[1]) # Solid pressure
# # nuh = Zh.rigid_motion(x[2]) # Function in V
niters = len(AAinv.residuals) - 1
assert niters < 120
P = rigid_motions.Projector(Zh)
P*x0[0] # to get orthogonality
if MPI.rank(mesh.mpi_comm()) == 0:
print '\033[1;37;31m%s\033[0m' % ('Orthogonality %g' % max(P.alphas))
pass
return V.dim() + Q.dim() + 6, niters
def test_lagrange_mixed():
'''Number of iterations should not blow up'''
from numpy import savetxt, array, c_
mu = Constant(1)
f = Expression(('A*sin(2*x[0])', 'A*cos(3*(x[0]+x[1]+x[2]))', 'A*sin(x[2])'),
degree=3, A=0.01)
h = Constant((0, 0, 0))
comm = mpi_comm_world().tompi4py()
Ns = [2, 4, 8]
if comm.size > 3:
Ns.extend([16, 32, 64])
data = []
lmbdas = 1E12, 1E8, 1E4, 1E0
for lmbda in lmbdas:
meshes = (BoxMesh(Point(1, 1, 1), Point(2, 1.5, 1.25), N, N, N) for N in Ns)
converged = lagrange_mixed(Constant(lmbda), mu, f, h, meshes)
assert all(converged)
if len(data) == 0:
data = array(converged)
else:
data = c_[data, array(converged)[:, -1]]
# Dump data for plotting
if comm.rank == 0:
header = ' '.join(map(str, lmbdas))
savetxt('./.lagrange_mixed.txt', data, fmt=['%d']*data.shape[1],
header=header)
return True
# ------------------------------------------------------------------------------
if __name__ == '__main__':
set_log_level(PROGRESS)
assert test_lagrange_mixed()
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 5 18:28:30 2019
@author: Theo
"""
from sklearn.decomposition import PCA
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
tracks = pd.read_csv('dataframe.csv',sep=',',encoding='latin1')
tracks_numerical=tracks.drop(['Artist','Track Name','Unnamed: 0'],axis=1)
tracks_numerical['Score'].describe()
sns.distplot(tracks_numerical['Score'])
score=np.log(tracks_numerical['Score'])
score.describe()
sns.distplot(score)
score=(score - np.mean(score)) / np.std(score)
score.describe()
sns.distplot(score)
df = tracks_numerical
n_components = 18
# Do the PCA.
pca = PCA(n_components=n_components)
reduced = pca.fit_transform(df)
# Append the principle components for each entry to the dataframe
for i in range(0, n_components):
df['PC' + str(i + 1)] = reduced[:, i]
display(df.head())
g = sns.lmplot(
'PC1',
'PC2',
data=df,
hue='Score',
fit_reg=False,
scatter=True,
size=7,
)
plt.show()
# Plot a variable factor map for the first two dimensions.
(fig, ax) = plt.subplots(figsize=(12, 12))
for i in range(0, len(pca.components_)):
ax.arrow(0, 0, # Start the arrow at the origin
pca.components_[0, i], pca.components_[1, i], # 0 and 1 correspond to dimension 1 and 2
head_width=0.1,head_length=0.1)
plt.text(pca.components_[0, i] + 0.05, pca.components_[1, i] + 0.05, df.columns.values[i])
an = np.linspace(0, 2 * np.pi, 100) # Add a unit circle for scale
plt.plot(np.cos(an), np.sin(an))
plt.axis('equal')
ax.set_title('Variable factor map')
plt.show()
|
import re
grid_of_lights = []
for i in range(1000):
new = []
for j in range(1000):
new.append(False)
grid_of_lights.append(new)
def turn_on(x1, y1, x2, y2):
for x in range(x1, x2 + 1):
for y in range(y1, y2 + 1):
grid_of_lights[x][y] = True
def turn_off(x1, y1, x2, y2):
for x in range(x1, x2 + 1):
for y in range(y1, y2 + 1):
grid_of_lights[x][y] = False
def toggle(x1, y1, x2, y2):
for x in range(x1, x2 + 1):
for y in range(y1, y2 + 1):
grid_of_lights[x][y] = not grid_of_lights[x][y]
def printer():
print "Grid: "
for light in grid_of_lights:
print light
print "\n"
def counter():
count = 0
for line in grid_of_lights:
for light in line:
if light:
count += 1
print count
def parser(line):
x = []
y = []
coords = re.findall(r"\d*,\d*", line)
for coord in coords:
x.append(int(coord.split(',')[0]))
y.append(int(coord.split(',')[1]))
# now check what action we're performing
tog = 'toggle'
on = 'turn on'
off = 'turn off'
if tog in line:
toggle(x[0], x[1], y[0], y[1])
elif on in line:
turn_on(x[0], x[1], y[0], y[1])
elif off in line:
turn_off(x[0], x[1], y[0], y[1])
else:
print 'whoops'
parser('turn on 0,0 through 999,999')
counter()
|
from __future__ import print_function
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
from sklearn.model_selection import train_test_split
from sklearn import linear_model, ensemble, metrics, gaussian_process
from dataClean import *
from sklearn.svm import SVC
from sklearn import metrics, svm
from sklearn.naive_bayes import GaussianNB
from sklearn import preprocessing
from sklearn.metrics import mean_squared_error
from sklearn.externals import joblib
from sklearn.metrics import explained_variance_score,mean_absolute_error,mean_squared_error,r2_score
from sklearn.neural_network import MLPRegressor
if __name__ == "__main__":
df = pd.read_csv("refined_data.csv")
x = df.drop(['PriceNumeric'], axis=1)
y = np.log(df.PriceNumeric)
# Store X y for NN and kmean
x.to_csv("num_X.csv",index=False)
df.PriceNumeric.to_csv("num_Y.csv",index=False)
min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0,2))
x['KmNumeric'] = min_max_scaler.fit_transform(x['KmNumeric'].values.reshape(-1,1))
x['MakeYear'] = (x['MakeYear']-1998)/15
x.to_csv("minus.csv",index=False)
# np.savetxt("minmax.csv",x,fmt='%1.3f',delimiter=',')
# splite data
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=42, test_size=.33)
# liner regression
lr = linear_model.LinearRegression()
# SVR
svr = svm.SVR(kernel='rbf')
# GB
gb = ensemble.GradientBoostingRegressor(n_estimators=200, max_depth=15, loss='huber', learning_rate=0.03)
# random forest
rf = ensemble.RandomForestRegressor(n_estimators=10)
# NN
nn = MLPRegressor(hidden_layer_sizes=(100,50),solver='adam',learning_rate_init=0.01,max_iter=500,activation='relu')
models = [lr]
for modelfc in models:
model = modelfc.fit(x_train, y_train)
y_pred = model.predict(x_test)
test_score = r2_score(y_test,y_pred)
print("rf: ",test_score)
# store model file in server for API
joblib.dump(model, './model/lr_new.pkl')
# plot figure to show the regression
y_pred = model.predict(x_test)
plt.scatter(y_pred, y_test, alpha=.65, color='b')
plt.xlabel('Predicted Price')
plt.ylabel('Actual Price')
plt.title('Support Vector Regression Model')
overlay = 'R^2 is: {}\nRMSE is: {}'.format(test_score, mean_squared_error(y_test, y_pred))
plt.annotate(s=overlay,xy=(14,12),size='x-large')
plt.show()
|
# Пользователь вводит время в секундах.
# Переведите время в часы, минуты и секунды и выведите в формате чч:мм:сс.
# Используйте форматирование строк.
seconds = int(input("Введите время в секундах "))
hour = int(seconds // 3600)
minutes = int((seconds % 3600) // 60)
seconds = int((seconds % 3600) % 60)
time = f"{hour:02d}:{minutes:02d}:{seconds:02d}"
print(f"Вы ввели {time}")
|
# A for loop is used for iterating over a sequence (that is either a list, a tuple, a dictionary, a set, or a string).
# With the for loop we can execute a set of statements, once for each item in a list, tuple, set etc.
fruits = ["apple", "banana", "cherry"]
for x in fruits:
print(x)
# Looping Through a String
# Even strings are iterable objects, they contain a sequence of characters:
for x in "banana":
print(x)
# The break Statement
# With the break statement we can stop the loop before it has looped through all the items:
fruits = ["apple", "banana", "cherry"]
for x in fruits:
print(x)
if x == "banana":
break
fruits = ["apple", "banana", "cherry"]
for x in fruits:
if x == "banana":
break
print(x)
# The continue Statement
# With the continue statement we can stop the current iteration of the loop, and continue with the next:
fruits = ["apple", "banana", "cherry"]
for x in fruits:
if x == "banana":
continue
print(x)
# The range() Function
# To loop through a set of code a specified number of times, we can use the range() function
# The range() function returns a sequence of numbers, starting from 0 by default, and increments by 1 (by default),
# and ends at a specified number.
for x in range(6):
print(x)
# Note that range(6) is not the values of 0 to 6, but the values 0 to 5.
# default range start from 0 but we can speicfy the range
for x in range(2, 6):
print(x)
# he range() function defaults to increment the sequence by 1, however it is possible to specify the increment value
# by adding a third parameter: range(2, 30, 3):
for x in range(2, 30, 3):
print(x)
# Else in For Loop
# The else keyword in a for loop specifies a block of code to be executed when the loop is finished:
for x in range(6):
print(x)
else:
print("Finally finished!")
# Note: The else block will NOT be executed if the loop is stopped by a break statement.
for x in range(6):
if x == 3: break
print(x)
else:
print("Finally finished!")
# Nested Loops
# A nested loop is a loop inside a loop.
# The "inner loop" will be executed one time for each iteration of the "outer loop"
adj = ["red", "big", "tasty"]
fruits = ["apple", "banana", "cherry"]
for x in adj:
for y in fruits:
print(x, y)
# The pass Statement - for loop cannot be empty
for x in [0, 1, 2]:
pass
|
from django import template
register = template.Library()
def include_js(context,jsname,path='assets/studytribe/js/'):
return {'jsname':jsname,
'STATIC_URL':context['STATIC_URL'],
'debug':('debug' in context),
'path':path}
def include_css(context,cssname,path='assets/studytribe/css/'):
return {'cssname':cssname,
'STATIC_URL':context['STATIC_URL'],
'debug':('debug' in context),
'path':path}
register.inclusion_tag('inclusion_tags/include_js.html',
takes_context=True)(include_js)
register.inclusion_tag('inclusion_tags/include_css.html',
takes_context=True)(include_css)
def get_field_verbose_name(object,arg):
return object._meta.get_field(arg).verbose_name
register.filter('field_verbose_name', get_field_verbose_name)
|
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url
from django.views.generic.base import TemplateView
urlpatterns = [
path('', views.homepageView, name="homepage"),
path('login/', views.loginView, name="login"),
path('signup/', views.signupView, name="signup"),
path('logout/',views.logoutView,name="logout"),
path('landingpage/', views.landingpage, name="landingpage")
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
import os
os.system("echo hello")
os.system("echo $HOME")
os.system("echo Before $MY_TEST")
os.environ['MY_TEST'] = 'qqrq'
os.system("echo After $MY_TEST")
|
"""Restaurant rating lister."""
def ratings_dict(filename):
restaurant_ratings = open(filename,"r")
ratings_dict = {}
for line in restaurant_ratings:
name, rating = line.rstrip().split(":")
ratings_dict[name] = rating
sorted_keys = sorted(ratings_dict.keys())
# for key in sorted_keys:
# print("{} is rated at {}.".format(key, ratings_dict[key]))
return ratings_dict
# put your code here
def user_ratings():
user_dict = ratings_dict("scores.txt")
name = input("What is the name of the restaurant?")
score = input ("What is your rating for this restaurant?")
user_dict[name] = score
sorted_keys = sorted(user_dict.keys())
for key in sorted_keys:
print("{} is rated at {}.".format(key, user_dict[key]))
user_ratings()
|
class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
return self.plusOneHelper(digits)
def plusOneHelper(self, digits):
result = list(digits)
last_number = result[-1]
if last_number < 9:
last_number += 1
result[-1] = last_number
else:
digits_length_minus_one = len(result) - 1
if digits_length_minus_one == 0:
result = [1, 0]
else:
updated = self.plusOneHelper(digits[0:-1])
updated.append(0)
result = updated
return result
solution = Solution()
print(solution.plusOne([1,9,9,9]))
|
from django.shortcuts import render
from .models import Listing
def index(request):
return render(request , 'listings/listings.html')
|
import discord
from discord.ext import commands
from discord.ext.commands import has_permissions
import os
client = commands.Bot(command_prefix = '?')
@client.event
async def on_ready():
print("BitBot is up and ready")
await client.change_presence(activity=discord.Game(name='With Yo Girl'))
@client.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
await ctx.send("Sorry, command not found \nTo view a list of commands enter: '?help'")
@client.event
async def on_guild_join(guild):
channel = discord.utils.get(guild.text_channels, name='general')
await channel.send("Hi, I'm BitBot.\nMy command prefix is '?', to see a list of commands type '?help'")
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
client.run('') |
import logging
import os
import tempfile
import tarfile
import shutil
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from pytorch_pretrained_bert.modeling import BertModel, BertPredictionHeadTransform, BertConfig, \
BertLayerNorm, PRETRAINED_MODEL_ARCHIVE_MAP, BERT_CONFIG_NAME, TF_WEIGHTS_NAME, load_tf_weights_in_bert
from pytorch_pretrained_bert.file_utils import cached_path, WEIGHTS_NAME, CONFIG_NAME
from perl_embedding_initializer import get_emb_weights
logger = logging.getLogger(__name__)
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.pivots_decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.pivots_decoder.weight = bert_model_embedding_weights
self.pivots_bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.pivots_decoder(hidden_states) + self.pivots_bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
cache_dir = kwargs.get('cache_dir', None)
kwargs.pop('cache_dir', None)
from_tf = kwargs.get('from_tf', False)
kwargs.pop('from_tf', None)
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file) or from_tf:
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
if not os.path.exists(config_file):
# Backward compatibility with old naming format
config_file = os.path.join(serialization_dir, BERT_CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path, map_location='cpu')
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
if from_tf:
# Directly load from a TensorFlow checkpoint
weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
return load_tf_weights_in_bert(model, weights_path)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, False, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
return model
class BertForMaskedLM(BertPreTrainedModel):
"""BERT model with the masked language modeling head.
This module comprises the BERT model followed by the masked language modeling head.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`head_mask`: an optional torch.LongTensor of shape [num_heads] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs:
if `masked_lm_labels` is not `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, output_dim, output_attentions=False, init_embed=True, src='books', trg='dvd'):
super(BertForMaskedLM, self).__init__(config)
self.output_attentions = output_attentions
self.output_dim = output_dim
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
self.cls.predictions.pivots_decoder = nn.Linear(self.bert.embeddings.word_embeddings.weight.size(1), output_dim,
bias=False)
if init_embed:
init_emb = get_emb_weights(src, trg, num_pivots=output_dim-1)
self.cls.predictions.pivots_decoder.weight.data[1:] = init_emb
self.cls.predictions.pivots_bias = nn.Parameter(torch.zeros(output_dim))
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, head_mask=None):
outputs = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
if self.output_attentions:
all_attentions, sequence_output, _ = outputs
else:
sequence_output, _ = outputs
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.output_dim), masked_lm_labels.view(-1))
return masked_lm_loss
elif self.output_attentions:
return all_attentions, prediction_scores
return prediction_scores
|
# version 0.82 seconds on n = 100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 15 13:55:39 2017
@author: sdorai000
"""
#### ACTIVE EDIT ####
import sys
import time
result_count = 0
stairs = []
input_list = []
result_list = []
def answer(n):
found_count = 0
remaining_stairs = 0
stairs = [1]
max_index = (n // 2 - 1) if n % 2 == 0 else (n // 2)
while(True):
#print("starting while:",stairs)
if sum(stairs) == n:
found_count+=1
#print("Found stairs:",stairs)
# check for exit from while
if stairs[0] == max_index and stairs[1] == (n-max_index):
break
stairs.pop()
max_possible_val = n - sum(stairs[:len(stairs)-1])
next_max_val = (max_possible_val // 2 - 1) if max_possible_val % 2 == 0 else (max_possible_val // 2)
if stairs[len(stairs)-1] + 1 <= next_max_val:
stairs[len(stairs) - 1] += 1
else:
stairs[len(stairs) - 1] = max_possible_val
#stairs[len(stairs) - 1]+=1
continue
remaining_stairs = n - sum(stairs)
if remaining_stairs > stairs[len(stairs)-1]:
next_max_val = (remaining_stairs // 2 - 1) if remaining_stairs % 2 == 0 else (remaining_stairs // 2)
if stairs[len(stairs)-1] + 1 <= next_max_val:
stairs.append(stairs[len(stairs)-1] + 1)
else:
stairs.append(remaining_stairs)
else:
stairs[len(stairs)-1] += 1
return found_count
def main(argv):
# My code here
brick_count = 100
start = time.time()
# run main function
count = answer(brick_count)
end = time.time()
elapsed = end - start
print("brick_count,result_count,elapsed_time:", count, result_count, elapsed)
if __name__ == "__main__":
main(sys.argv) |
"""
## Sequence naming on the scanner console
Sequence names on the scanner must follow this specification to avoid manual
conversion/handling:
[PREFIX:][WIP ]<seqtype[-label]>[_ses-<SESID>][_task-<TASKID>][_acq-<ACQLABEL>][_run-<RUNID>][_dir-<DIR>][<more BIDS>][__<custom>]
where
[PREFIX:] - leading capital letters followed by : are stripped/ignored
[WIP ] - prefix is stripped/ignored (added by Philips for patch sequences)
<...> - value to be entered
[...] - optional -- might be nearly mandatory for some modalities (e.g.,
run for functional) and very optional for others
*ID - alpha-numerical identifier (e.g. 01,02, pre, post, pre01) for a run,
task, session. Note that makes more sense to use numerical values for
RUNID (e.g., _run-01, _run-02) for obvious sorting and possibly
descriptive ones for e.g. SESID (_ses-movie, _ses-localizer)
<seqtype[-label]>
a known BIDS sequence type which is usually a name of the folder under
subject's directory. And (optional) label is specific per sequence type
(e.g. typical "bold" for func, or "T1w" for "anat"), which could often
(but not always) be deduced from DICOM. Known to BIDS modalities are:
anat - anatomical data. Might also be collected multiple times across
runs (e.g. if subject is taken out of magnet etc), so could
(optionally) have "_run" definition attached. For "standard anat"
labels, please consult to "8.3 Anatomy imaging data" but most
common are 'T1w', 'T2w', 'angio'
func - functional (AKA task, including resting state) data.
Typically contains multiple runs, and might have multiple different
tasks different per each run
(e.g. _task-memory_run-01, _task-oddball_run-02)
fmap - field maps
dwi - diffusion weighted imaging (also can as well have runs)
_ses-<SESID> (optional)
a session. Having a single sequence within a study would make that study
follow "multi-session" layout. A common practice to have a _ses specifier
within the scout sequence name. You can either specify explicit session
identifier (SESID) or just say to maintain, create (starts with 1).
You can also use _ses-{date} in case of scanning phantoms or non-human
subjects and wanting sessions to be coded by the acquisition date.
_task-<TASKID> (optional)
a short name for a task performed during that run. If not provided and it
is a func sequence, _task-UNKNOWN will be automatically added to comply with
BIDS. Consult http://www.cognitiveatlas.org/tasks on known tasks.
_acq-<ACQLABEL> (optional)
a short custom label to distinguish a different set of parameters used for
acquiring the same modality (e.g. _acq-highres, _acq-lowres etc)
_run-<RUNID> (optional)
a (typically functional) run. The same idea as with SESID.
_dir-[AP,PA,LR,RL,VD,DV] (optional)
to be used for fmap images, whenever a pair of the SE images is collected
to be used to estimate the fieldmap
<more BIDS> (optional)
any other fields (e.g. _acq-) from BIDS acquisition
__<custom> (optional)
after two underscores any arbitrary comment which will not matter to how
layout in BIDS. But that one theoretically should not be necessary,
and (ab)use of it would just signal lack of thought while preparing sequence
name to start with since everything could have been expressed in BIDS fields.
## Last moment checks/FAQ:
- Functional runs should have _task-<TASKID> field defined
- Do not use "+", "_" or "-" within SESID, TASKID, ACQLABEL, RUNID, so we
could detect "canceled" runs.
- If run was canceled -- just copy canceled run (with the same index) and re-run
it. Files with overlapping name will be considered duplicate/canceled session
and only the last one would remain. The others would acquire
__dup0<number> suffix.
Although we still support "-" and "+" used within SESID and TASKID, their use is
not recommended, thus not listed here
## Scanner specifics
We perform following actions regardless of the type of scanner, but applied
generally to accommodate limitations imposed by different manufacturers/models:
### Philips
- We replace all ( with { and ) with } to be able e.g. to specify session {date}
- "WIP " prefix unconditionally added by the scanner is stripped
""" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.