text
stringlengths 0
1.05M
| meta
dict |
---|---|
"""4.2 to 4.3 - add groups_roles relation
Revision ID: 784a82cec07a
Revises: f1dab814a4a0
Create Date: 2017-11-19 11:38:04.633560
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '784a82cec07a'
down_revision = 'f1dab814a4a0'
branch_labels = None
depends_on = None
DEFAULT_SYSTEM_ROLE_ID = 6
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
'groups_roles',
sa.Column('group_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.ForeignKeyConstraint(['group_id'], ['groups.id'], )
)
op.execute(
'INSERT INTO groups_roles '
'SELECT id, {0} '
'FROM groups'.format(DEFAULT_SYSTEM_ROLE_ID)
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('groups_roles')
# ### end Alembic commands ###
| {
"repo_name": "cloudify-cosmo/cloudify-manager",
"path": "resources/rest-service/cloudify/migrations/versions/784a82cec07a_4_2_to_4_3.py",
"copies": "1",
"size": "1054",
"license": "apache-2.0",
"hash": -8959833898972082000,
"line_mean": 24.7073170732,
"line_max": 65,
"alpha_frac": 0.6337760911,
"autogenerated": false,
"ratio": 3.3144654088050314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4448241499905031,
"avg_score": null,
"num_lines": null
} |
"""42. Trapping Rain Water
https://leetcode.com/problems/trapping-rain-water/description/
Given n non-negative integers representing an elevation map where the width
of each bar is 1, compute how much water it is able to trap after raining.

The above elevation map is represented by array [0,1,0,2,1,0,1,3,2,1,2,1]. In
this case, 6 units of rain water (blue section) are being trapped. Thanks
Marcos for contributing this image!
Example:
Input: [0,1,0,2,1,0,1,3,2,1,2,1]
Output: 6
"""
from typing import List
class Solution:
def trap(self, height: List[int]) -> int:
ans, left = 0, 0
stack = []
for h in height:
if not stack:
stack.append(h)
left = h
elif h < left:
if h <= stack[-1]:
stack.append(h)
else:
cnt = 0
while stack[-1] < h:
ans += h - stack.pop()
cnt += 1
if not stack:
break
stack.extend([h] * (cnt + 1))
else:
top = min(left, h)
ans += len(stack) * top - sum(stack)
stack.clear()
stack.append(h)
left = h
return ans
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/trapping_rain_water.py",
"copies": "1",
"size": "1411",
"license": "mit",
"hash": 9130103083398567000,
"line_mean": 26.6666666667,
"line_max": 77,
"alpha_frac": 0.4982282069,
"autogenerated": false,
"ratio": 3.7626666666666666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4760894873566666,
"avg_score": null,
"num_lines": null
} |
"""433Mhz listener
Usage:
run.py run [--debug] [--redis-host=<hostname>] [--redis-port=<port>]
"""
import local_settings as settings
import datetime
import json
import logging
import redis
import serial
import time
import docopt
class Mhz433Listener(object):
def __init__(self, **kwargs):
redis_args = {}
if "redis_host" in kwargs and kwargs["redis_host"]:
redis_args["host"] = kwargs["redis_host"]
if "redis_port" in kwargs and kwargs["redis_port"]:
redis_args["port"] = kwargs["redis_port"]
self.redis = redis.StrictRedis(**redis_args)
self.logger = logging.getLogger("listen-433mhz")
if kwargs.get("debug"):
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
format_string = "%(asctime)s - %(levelname)s - %(message)s"
formatter = logging.Formatter(format_string)
ch = logging.StreamHandler()
ch.setFormatter(formatter)
self.logger.addHandler(ch)
def run(self):
s = serial.Serial(settings.ARDUINO_433, 9600)
ITEM_MAP = settings.ARDUINO_433_ITEM_MAP
sent_event_map = {}
while True:
line = s.readline()
if line.startswith("Received "):
id = line.split(" ")[1]
if id in ITEM_MAP:
item_name = ITEM_MAP[id]
if item_name in sent_event_map:
if sent_event_map[item_name] > time.time() - 5:
# Already triggered recently - no action
continue
self.logger.info("Processing trigger %s (%s)", item_name, id)
data = [{
"measurement": "triggers",
"time": datetime.datetime.utcnow().isoformat() + "Z",
"tags": {
"trigger": item_name,
},
"fields": {
"triggered": True,
},
}]
self.redis.publish("influx-update-pubsub", json.dumps(data))
sent_event_map[item_name] = time.time()
self.redis.publish("lightcontrol-triggers-pubsub", json.dumps({"key": item_name}))
self.redis.publish("pir-pubsub", json.dumps({"source": item_name, "name": item_name, "router": "433"}))
else:
self.logger.warn("Unknown ID: %s", id)
def main(args):
kwargs = {
"redis_host": args.get("--redis-host"),
"redis_port": args.get("--redis-post"),
}
lcs = Mhz433Listener(debug=arguments.get("--debug", False), **kwargs)
lcs.run()
if __name__ == '__main__':
arguments = docopt.docopt(__doc__, version='1.0')
main(arguments)
| {
"repo_name": "ojarva/home-info-display",
"path": "listen_433/run.py",
"copies": "1",
"size": "2891",
"license": "bsd-3-clause",
"hash": -8673570544704767000,
"line_mean": 34.6913580247,
"line_max": 123,
"alpha_frac": 0.5098581806,
"autogenerated": false,
"ratio": 4.118233618233618,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5128091798833618,
"avg_score": null,
"num_lines": null
} |
# 43. Multiply Strings - LeetCode
# https://leetcode.com/problems/multiply-strings/description/
class Solution:
def multiply(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
res = []
for i in range(len(num1)+len(num2)):
res.append(0)
if num1 == "" or num2 == "":
return ""
if num1 == "0" or num2 == "0":
return "0"
for i in range(len(num1)):
for j in range(len(num2)):
res[i+j] += int(num1[-1-i]) * int(num2[-1-j])
carry = 0
for i in range(len(res)):
res[i] += carry
carry = res[i] // 10
res[i] = res[i] % 10
flag = True
ret = ""
for i in res[::-1]:
if flag:
if i != 0:
flag = False
else:
continue
ret += str(i)
return ret
s = Solution()
ans = [
["","",""],
["0","0","0"],
["1","0","0"],
["1","2","2"],
["11","11","121"],
["111","111","12321"],
["999","999","998001"],
["123","321","39483"],
["12","3456","41472"],
["123","456","56088"],
]
for i in ans:
r = s.multiply(i[0],i[1])
print("O" if r == i[2] else "X", r) | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/043_multiply-strings.py",
"copies": "1",
"size": "1325",
"license": "mit",
"hash": 6011730040677549000,
"line_mean": 24.0188679245,
"line_max": 61,
"alpha_frac": 0.3939622642,
"autogenerated": false,
"ratio": 3.2960199004975124,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4189982164697512,
"avg_score": null,
"num_lines": null
} |
"""43. Multiply Strings
https://leetcode.com/problems/multiply-strings/description/
Given two non-negative integers num1 and num2 represented as strings, return
the product of num1 and num2, also represented as a string.
Example 1:
Input: num1 = "2", num2 = "3"
Output: "6"
Example 2:
Input: num1 = "123", num2 = "456"
Output: "56088"
Note:
The length of both num1 and num2 is < 110.
Both num1 and num2 contain only digits 0-9.
Both num1 and num2 do not contain any leading zero, except the number 0
itself.
You must not use any built-in BigInteger library or convert the inputs to
integer directly.
"""
class Solution:
def multiply(self, num1: str, num2: str) -> str:
len1, len2 = len(num1), len(num2)
len3 = len1 + len2
product = ["0"] * len3
ans = ""
for i in range(len1):
for j in range(len2):
temp = str(int(num1[i]) * int(num2[j]))
# how many zeros are after temp
zeros = len1 - i - 1 + len2 - j - 1
for k in range(len(temp) - 1, -1, -1):
# do the sum from with reverse
pos = zeros + (len(temp) - k)
temp_sum = int(product[len3 - pos]) + int(temp[k])
gte_ten = temp_sum >= 10
l = 1
while gte_ten:
# remind this case, it might be '999...9 + 1'
next_sum = int(product[len3 - pos - l]) + 1
gte_ten = next_sum >= 10
product[len3 - pos - l] = str(next_sum)[-1]
l += 1
product[len3 - pos] = str(temp_sum)[-1]
find_first_non_zero = False
for c in product:
if c == "0" and not find_first_non_zero:
continue
if c != "0":
find_first_non_zero = True
ans += c
if not find_first_non_zero:
return "0"
return ans
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/multiply_strings.py",
"copies": "1",
"size": "2015",
"license": "mit",
"hash": 7434603826304700000,
"line_mean": 28.1739130435,
"line_max": 76,
"alpha_frac": 0.5047193244,
"autogenerated": false,
"ratio": 3.575488454706927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9575083170195502,
"avg_score": 0.0010249217822850363,
"num_lines": 69
} |
# 43
from permutation import permutation
from util import list2int
class Solve(object):
def __init__(self):
self.s = permutation([0,1,2,3,4,5,6,7,8,9])
self.s = [x for x in self.s if x[0] != 0]
def f1(self, vec):
return vec[3] % 2 == 0
def f2(self, vec):
return sum([vec[2], vec[3], vec[4]]) % 3 == 0
def f3(self, vec):
return vec[5] % 5 == 0
def f4(self, vec):
return list2int(vec[4:7]) % 7 == 0
def f5(self, vec):
return list2int(vec[5:8]) % 11 == 0
def f6(self, vec):
return list2int(vec[6:9]) % 13 == 0
def f7(self, vec):
return list2int(vec[7:10]) % 17 == 0
def solve(self):
def s_filter(func):
self.s = [x for x in self.s if func(x)]
s_filter(self.f1)
s_filter(self.f2)
s_filter(self.f3)
s_filter(self.f4)
s_filter(self.f5)
s_filter(self.f6)
s_filter(self.f7)
return sum([list2int(x) for x in self.s])
s = Solve()
print s.solve() | {
"repo_name": "daicang/Euler",
"path": "p43.py",
"copies": "1",
"size": "1041",
"license": "mit",
"hash": 8048163963726238000,
"line_mean": 20.7083333333,
"line_max": 53,
"alpha_frac": 0.5158501441,
"autogenerated": false,
"ratio": 2.768617021276596,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3784467165376596,
"avg_score": null,
"num_lines": null
} |
"""4.3 to 4.4 - add is_hidden_value column to secrets table
Revision ID: c7652b2a97a4
Revises: 3483e421713d
Create Date: 2018-04-03 14:31:11.832546
"""
from alembic import op
import sqlalchemy as sa
from manager_rest.storage.models_base import UTCDateTime
# revision identifiers, used by Alembic.
revision = 'c7652b2a97a4'
down_revision = '3483e421713d'
branch_labels = None
depends_on = None
def upgrade():
# server_default accepts string or SQL element only
op.add_column('secrets', sa.Column('is_hidden_value',
sa.Boolean(),
nullable=False,
server_default='f'))
op.add_column('deployment_updates', sa.Column('_old_blueprint_fk',
sa.Integer(),
nullable=True))
op.add_column('deployment_updates', sa.Column('_new_blueprint_fk',
sa.Integer(),
nullable=True))
op.add_column('deployment_updates', sa.Column('old_inputs',
sa.PickleType(),
nullable=True))
op.add_column('deployment_updates', sa.Column('new_inputs',
sa.PickleType(),
nullable=True))
op.add_column('users', sa.Column('last_failed_login_at',
UTCDateTime(),
nullable=True))
op.add_column('users', sa.Column('failed_logins_counter',
sa.Integer(),
nullable=False,
server_default="0"))
op.add_column('executions', sa.Column('ended_at',
UTCDateTime(),
nullable=True))
op.execute('COMMIT')
op.execute("alter type execution_status add value 'kill_cancelling'")
def downgrade():
op.drop_column('secrets', 'is_hidden_value')
op.drop_column('deployment_updates', '_old_blueprint_fk')
op.drop_column('deployment_updates', '_new_blueprint_fk')
op.drop_column('deployment_updates', 'old_inputs')
op.drop_column('deployment_updates', 'new_inputs')
op.drop_column('users', 'last_failed_login_at')
op.drop_column('users', 'failed_logins_counter')
op.drop_column('executions', 'ended_at')
# remove the 'kill_cancelling' value of the execution status enum
# we are downgrading, so first change the executions that are currently
# kill_cancelling to something else that makes sense. It might well be
# failed, since during downgrade, mgmtworker is surely not running.
op.execute("""
update executions
set status='failed'
where status='kill_cancelling'
""")
# unfortunately postgres doesn't directly support removing enum values,
# so we create a new type with the correct enum values and swap
# out the old one
op.execute("alter type execution_status rename to execution_status_old")
# create the new type
execution_status = sa.Enum(
'terminated',
'failed',
'cancelled',
'pending',
'started',
'cancelling',
'force_cancelling',
name='execution_status',
)
execution_status.create(op.get_bind())
# update executions to use the new type
op.alter_column('executions',
'status',
type_=execution_status,
postgresql_using='status::text::execution_status')
# remove the old type
op.execute("DROP TYPE execution_status_old;")
| {
"repo_name": "cloudify-cosmo/cloudify-manager",
"path": "resources/rest-service/cloudify/migrations/versions/c7652b2a97a4_4_3_to_4_4.py",
"copies": "1",
"size": "3847",
"license": "apache-2.0",
"hash": -3760479550921424400,
"line_mean": 39.0729166667,
"line_max": 76,
"alpha_frac": 0.5393813361,
"autogenerated": false,
"ratio": 4.4889148191365225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5528296155236523,
"avg_score": null,
"num_lines": null
} |
"""444"""
import hid
from mcp2210 import commands
import time
import sys
import os
import pprint
import time
HID_SEND_BUFFER_OFFSET = (1 if 'nt' in os.name or 'windows' in os.name else 0)
#PY_PRE_2_6 = (True if sys.version_info[0] <= 2 and sys.version_info[1] < 6 else False)
#buf should be an iterable of bytes with len <= 63
def shift_buf(buf):
if sys.platform.startswith('win') or sys.platform.startswith('win32'):
#windows
shifted = bytearray(len(buf)+HID_SEND_BUFFER_OFFSET+1)
else:
#linux
shifted = bytearray(len(buf))
#shifted = bytearray(len(buf))
for i in range(len(buf)):
shifted[HID_SEND_BUFFER_OFFSET+i] = buf[i]
#print(len(shifted))
return shifted
def commandbuffer(struct):
return shift_buf(bytearray(struct))
class CommandException(Exception):
"""Thrown when the MCP2210 returns an error status code."""
def __init__(self, code):
super(CommandException, self).__init__("Got error code from device: 0x%.2x" % code)
class GPIOSettings(object):
"""Encapsulates settings for GPIO pins - direction or status."""
def __init__(self, device, get_command, set_command):
self._device = device
self._get_command = get_command
self._set_command = set_command
self._value = None
@property
def raw(self):
if self._value is None:
self._value = self._device.sendCommand(self._get_command()).gpio
# self._value = self._device.sendCommand(self._get_command()).gpio
return self._value
@raw.setter
def raw(self, value):
self._value = value
self._device.sendCommand(self._set_command(value), 'Small')
def __getitem__(self, i):
return (self.raw >> i) & 1
def __setitem__(self, i, value):
if value:
self.raw |= 1 << i
else:
self.raw &= ~(1 << i)
def remote_property(name, get_command, set_command, field_name, doc=None):
"""Property decorator that facilitates writing properties for values from a remote device.
Arguments:
name: The field name to use on the local object to store the cached property.
get_command: A function that returns the remote value of the property.
set_command: A function that accepts a new value for the property and sets it remotely.
field_name: The name of the field to retrieve from the response message to get operations.
"""
def getter(self):
try:
return getattr(self, name)
except AttributeError:
value = getattr(self.sendCommand(get_command()), field_name)
setattr(self, name, value)
return value
def setter(self, value):
setattr(self, name, value)
self.sendCommand(set_command(value))
return property(getter, setter, doc=doc)
class EEPROMData(object):
"""Represents data stored in the MCP2210 EEPROM."""
def __init__(self, device):
self._device = device
def __getitem__(self, key):
if isinstance(key, slice):
return ''.join(self[i] for i in range(*key.indices(255)))
else:
return chr(self._device.sendCommand(commands.ReadEEPROMCommand(key)).data)
def __setitem__(self, key, value):
if isinstance(key, slice):
for i, j in enumerate(range(*key.indices(255))):
self[j] = value[i]
else:
self._device.sendCommand(commands.WriteEEPROMCommand(key, ord(value)))
class MCP2210(object):
"""MCP2210 device interface.
Usage:
>>> dev = MCP2210(my_vid, my_pid)
>>> dev.transfer("data")
Advanced usage:
>>> dev.manufacturer_name = "Foobar Industries Ltd"
>>> print dev.manufacturer_name
Foobar Industries Ltd
>>> dev.product_name = "Foobinator 1.0"
>>> print dev.product_name
Foobinator 1.0
>>> settings = dev.boot_chip_settings
>>> settings.pin_designations[0] = 0x01 # GPIO 0 to chip select
>>> dev.boot_chip_settings = settings # Settings are updated on property assignment
See the MCP2210 datasheet (http://ww1.microchip.com/downloads/en/DeviceDoc/22288A.pdf) for full details
on available commands and arguments.
"""
def __init__(self, vid, pid):
"""Constructor.
Arguments:
vid: Vendor ID
pid: Product ID
"""
self.spi_tx = 0
self.hid = hid.device()
self.hid.open(vid, pid)
self.gpio_direction = GPIOSettings(self, commands.GetGPIODirectionCommand, commands.SetGPIODirectionCommand)
self.gpio = GPIOSettings(self, commands.GetGPIOValueCommand, commands.SetGPIOValueCommand)
self.eeprom = EEPROMData(self)
self.cancel_transfer()
def sendCommand(self, command, lastone = None):
"""Sends a Command object to the MCP2210 and returns its response.
Arguments:
A commands.Command instance
Returns:
A commands.Response instance, or raises a CommandException on error.
"""
command_data = commandbuffer(command)
if lastone and lastone == "Small":
self.hid.write(command_data[0:8])
self.hid.read(0)
#self.cancel_transfer()
#self.hid.read(0)
return
self.hid.write(command_data)
dat = self.hid.read(64)
response_data = bytearray(x for x in dat)
response = command.RESPONSE.from_buffer_copy(response_data)
if response.status != 0:
if response.status == 0xf8:
return self.sendCommand(command, None)
else:
raise CommandException(response.status)
return response
manufacturer_name = remote_property(
'_manufacturer_name',
commands.GetUSBManufacturerCommand,
commands.SetUSBManufacturerCommand,
'string',
doc="Sets and gets the MCP2210 USB manufacturer name")
product_name = remote_property(
'_product_name',
commands.GetUSBProductCommand,
commands.SetUSBProductCommand,
'string',
doc="Sets and gets the MCP2210 USB product name")
boot_chip_settings = remote_property(
'_boot_chip_settings',
commands.GetBootChipSettingsCommand,
commands.SetBootChipSettingsCommand,
'settings',
doc="Sets and gets boot time chip settings such as GPIO assignments")
chip_settings = remote_property(
'_chip_settings',
commands.GetChipSettingsCommand,
commands.SetChipSettingsCommand,
'settings',
doc="Sets and gets current chip settings such as GPIO assignments")
boot_transfer_settings = remote_property(
'_boot_transfer_settings',
commands.GetBootSPISettingsCommand,
commands.SetBootSPISettingsCommand,
'settings',
doc="Sets and gets boot time transfer settings such as data rate")
transfer_settings = remote_property(
'_transfer_settings',
commands.GetSPISettingsCommand,
commands.SetSPISettingsCommand,
'settings',
doc="Sets and gets current transfer settings such as data rate")
boot_usb_settings = remote_property(
'_boot_usb_settings',
commands.GetBootUSBSettingsCommand,
commands.SetBootUSBSettingsCommand,
'settings',
doc="Sets and gets boot time USB settings such as VID and PID")
def authenticate(self, password):
"""Authenticates against a password-protected MCP2210.
Arguments:
password: The password to use.
"""
self.sendCommand(commands.SendPasswordCommand(password))
def transfer(self, data):
"""Transfers data over SPI.
Arguments:
data: The data to transfer.
Returns:
The data returned by the SPI device.
"""
if self.spi_tx != len(data):
settings = self.transfer_settings
settings.spi_tx_size = len(data)
self.transfer_settings = settings
self.spi_tx = len(data)
response = ''
for i in range(0, len(data), 60):
response += self.sendCommand(commands.SPITransferCommand(data[i:i + 60])).data
if len(response) < len(data):
self.sendCommand(commands.SPITransferCommand(''), 'Small')
return ''.join(response)
def cancel_transfer(self):
"""Cancels any ongoing transfers."""
self.sendCommand(commands.CancelTransferCommand())
| {
"repo_name": "calcite/ResetBuddy",
"path": "RB_UTILITY/mcp2210/device.py",
"copies": "1",
"size": "8586",
"license": "mit",
"hash": 5629947500615108000,
"line_mean": 31.4,
"line_max": 116,
"alpha_frac": 0.6205450734,
"autogenerated": false,
"ratio": 4.048090523338048,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5168635596738047,
"avg_score": null,
"num_lines": null
} |
# 4455770 Dennis Verheijden KI
# 4474139 Remco van der Heijden KI
# multiAgents.py
# --------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
from util import manhattanDistance, nearestPoint
from game import Directions, Agent, Actions
import random, util
import distanceCalculator
class CompetitionAgent(Agent):
"""
A base class for competition agents. The convenience methods herein handle
some of the complications of the game.
Recommended Usage: Subclass CompetitionAgent and override getAction.
"""
#############################
# Methods to store key info #
#############################
def __init__(self, index=0):
"""
Lists several variables you can query:
self.index = index for this agent
self.distancer = distance calculator (contest code provides this)
self.timeForComputing = an amount of time to give each turn for computing maze distances
(part of the provided distance calculator)
"""
# Agent index for querying state, N.B. pacman is always agent 0
self.index = index
# Maze distance calculator
self.distancer = None
# Time to spend each turn on computing maze distances
# Access to the graphics
self.display = None
# useful function to find functions you've defined elsewhere..
# self.usefulFunction = util.lookup(usefulFn, globals())
# self.evaluationFunction = util.lookup(evalFn, globals())
def registerInitialState(self, gameState):
"""
This method handles the initial setup of the
agent to populate useful fields.
A distanceCalculator instance caches the maze distances
between each pair of positions, so your agents can use:
self.distancer.getDistance(p1, p2)
"""
self.distancer = distanceCalculator.Distancer(gameState.data.layout)
# comment this out to forgo maze distance computation and use manhattan distances
self.distancer.getMazeDistances()
# Static world properties
self.wallList = gameState.getWalls()
self.wallHeight = self.wallList.height
self.wallWidth = self.wallList.width
# Determine in which world you are
if self.wallHeight == 9 and self.wallWidth == 25:
self.world = 'level0'
if self.wallHeight == 7 and self.wallWidth == 20:
self.world = 'level1'
if self.wallHeight == 13 and self.wallWidth == 20:
self.world = 'level2'
if self.wallHeight == 27 and self.wallWidth == 28:
self.world = 'level3'
else:
self.world = 'unknown'
# Set the depth at which you want to search
if self.world == 'level0':
self.depth = 2
self.timeForComputing = .2
if self.world == 'level1':
self.depth = 3
self.timeForComputing = .2
if self.world == 'level2':
self.depth = 2
self.timeForComputing = .3
self.capsuleImpulse = True
if self.world == 'level3':
self.depth = 3
self.timeForComputing = .25
if self.world == 'unknown':
self.depth = 2
self.timeForComputing = .2
# Prepare for the pacman ExploredList
self.exploredListGrid = [[0 for x in range(100)] for x in range(100)]
self.exploredList = []
# Prepare for the ghost properties
# ghostIndex, DistanceToGhost, ScaredTime = ghost
self.ghosts = [(0, float('Inf'), 0), (1, float('Inf'), 0), (2, float('Inf'), 0), (3, float('Inf'), 0)]
# If the response is triggered to get a capsule, than go get it
self.capsuleImpulse = False
import __main__
if '_display' in dir(__main__):
self.display = __main__._display
#################
# Action Choice #
#################
def getAction(self, gameState):
"""
Override this method to make a good agent. It should return a legal action within
the time limit (otherwise a random legal action will be chosen for you).
"""
util.raiseNotDefined()
#######################
# Convenience Methods #
#######################
def getFood(self, gameState):
"""
Returns the food you're meant to eat. This is in the form of a matrix
where m[x][y]=true if there is food you can eat (based on your team) in that square.
"""
return gameState.getFood()
def getCapsules(self, gameState):
return gameState.getCapsules()
def getScore(self, gameState):
"""
Returns how much you are beating the other team by in the form of a number
that is the difference between your score and the opponents score. This number
is negative if you're losing.
"""
return gameState.getScore()
def getMazeDistance(self, pos1, pos2):
"""
Returns the distance between two points; These are calculated using the provided
distancer object.
If distancer.getMazeDistances() has been called, then maze distances are available.
Otherwise, this just returns Manhattan distance.
"""
d = self.distancer.getDistance(pos1, pos2)
return d
class MyPacmanAgent(CompetitionAgent):
"""
This is going to be your brilliant competition agent.
You might want to copy code from BaselineAgent (above) and/or any previos assignment.
"""
def getAction(self, gameState):
"""
getAction chooses among the best options according to the evaluation function.
Just like in the previous projects, getAction takes a GameState and returns
some Directions.X for some X in the set {North, South, West, East, Stop}.
"""
# Add current position to your exploredList (only look at the last 20 positions)
x, y = gameState.getPacmanPosition()
self.exploredListGrid[x][y] += 1
self.exploredList.append((x, y))
if len(self.exploredList) > 20:
x, y = self.exploredList.pop(0)
self.exploredListGrid[x][y] += -1
# Update the previous food and capsule state
self.foodGrid = gameState.getFood()
self.capsules = gameState.getCapsules()
self.oldScore = gameState.getScore()
self.nrOfFoods = len(self.foodGrid.asList())
# Helper Functions
def maxValue(state, currentDepth, alpha, beta):
"""
Calculates the maximum score possible for the pacman Agent
"""
currentDepth = currentDepth + 1
if state.isWin() or state.isLose() or currentDepth == self.depth:
return self.evaluationFunction(state)
maxScore = float('-Inf')
for pacmanAction in state.getLegalActions(0):
maxScore = max(maxScore, minValue(state.generateSuccessor(0, pacmanAction), currentDepth, 1, alpha, beta))
alpha = max(alpha, maxScore)
if beta <= alpha:
break # prune
return maxScore
def minValue(state, currentDepth, ghostIndex, alpha, beta):
"""
Calculates the minimum score possible for the ghost Agent(s)
"""
if state.isWin() or state.isLose():
return self.evaluationFunction(state)
minScore = float('Inf')
for ghostAction in state.getLegalActions(ghostIndex):
if ghostIndex == gameState.getNumAgents() - 1:
minScore = min(minScore, maxValue(state.generateSuccessor(ghostIndex, ghostAction), currentDepth, alpha, beta))
else:
minScore = min(minScore, minValue(state.generateSuccessor(ghostIndex, ghostAction), currentDepth, ghostIndex + 1, alpha, beta))
beta = min(beta, minScore)
if beta <= alpha:
break # prune
return minScore
# Begin AlphaBeta
pacmanActions = gameState.getLegalActions(0)
pacmanActions.remove("Stop")
maximum = float('-Inf')
alpha = float('-Inf')
beta = float('Inf')
maxAction = ''
for pacmanAction in pacmanActions:
currentDepth = 0
currentMax = minValue(gameState.generateSuccessor(0, pacmanAction), currentDepth, 1, alpha, beta)
if currentMax > maximum:
maximum = currentMax
maxAction = pacmanAction
if maxAction == '':
if self.lastAction in pacmanActions:
return self.lastAction
else:
import random
return random.choice(pacmanActions)
self.lastAction = maxAction
return maxAction
def evaluationFunction(self, state):
"""
Masterful Evaluation Function
"""
# Utilise a counter for the heuristic
heuristic = util.Counter()
# World Properties
oldFoodGrid = self.foodGrid
foodGrid = state.getFood()
nrOfFoods = len(foodGrid.asList())
capsules = self.capsules
# Pacman Properties
pacmanPosition = state.getPacmanPosition()
xPacman, yPacman = pacmanPosition
pacmanActions = set(Actions.getLegalNeighbors(pacmanPosition, self.wallList))
# Ghost Properties
ghostPositions = state.getGhostPositions()
ghostStates = state.getGhostStates()
nrGhosts = state.getNumAgents() - 1
ghostActions = []
totalGhostDistance = 0
minGhostDistance = float('Inf')
minScaredGhostDistance = float('Inf')
maxScaredTimer = float('-Inf')
for ghost in range(nrGhosts):
ghostIndex, ghostDistance, scaredTime= self.ghosts[ghost]
ghostDistance = self.getMazeDistance(pacmanPosition, ghostPositions[ghost])
totalGhostDistance += ghostDistance
scaredTime = ghostStates[ghost].scaredTimer
ghostActions += Actions.getLegalNeighbors(ghostPositions[ghost], self.wallList)
if ghostDistance < minScaredGhostDistance and scaredTime > 0:
minScaredGhostDistance = ghostDistance
if ghostDistance < minGhostDistance:
minGhostDistance = ghostDistance
if scaredTime > maxScaredTimer:
maxScaredTimer = scaredTime
self.ghosts[ghost] = (ghostIndex, ghostDistance, scaredTime)
# Help Functions
def minFoodDist(foodGrid, position):
"""
Returns the minimum food distance
It first searches for foods that are close by to save computation time.
"""
x, y = position
distances = []
if (x < 7):
x = 4
if (x >= self.wallWidth - 2):
x += -4
if (y < 7):
y = 4
if (y >= self.wallHeight - 2):
y += -4
for xFood in range(x-3,x+3,1):
for yFood in range (y-3,y+3,1):
food = foodGrid[xFood][yFood]
if food:
distances.append(self.getMazeDistance((xFood, yFood), position))
if len(distances) == 0:
distances = [self.getMazeDistance(food, position) for food in foodGrid.asList()]
if len(distances) > 0:
minDistance = min(distances)
return minDistance
else:
return 0
# Check for trapped situations (there are no good options for pacman)
goodActions = pacmanActions - set(ghostActions)
if not goodActions:
heuristic['trapped'] = -2000
# Lose case
if state.isLose():
return float('-Inf')
# Prefer not to visit already visited places (avoiding loops)
if self.exploredListGrid[xPacman][yPacman] > 2 and not(maxScaredTimer > 0):
heuristic['beenThere'] = -100 * self.exploredListGrid[xPacman][yPacman]
foodDifference = self.nrOfFoods - nrOfFoods
if foodDifference == 1:
heuristic['OneFoodLess'] = 1000
# Minimum distance to the food
if not(maxScaredTimer > 0):
if not oldFoodGrid[xPacman][yPacman]:
heuristic['minFoodDistance'] = -minFoodDist(foodGrid, pacmanPosition)/(self.wallWidth * self.wallHeight)
# Eating ghosts
if maxScaredTimer > 1:
# if maxScaredTimer < 2 * minScaredGhostDistance and maxScaredTimer > 0:
heuristic['nearScaredGhost'] = 100 / minScaredGhostDistance
# Prioritise ghost eating when ghosts are scared, not food
if maxScaredTimer > 0:
if oldFoodGrid[xPacman][yPacman]:
heuristic['eatFood'] = -10
# Capsule Reasoning
capsuleDistance = [self.getMazeDistance(capsule, pacmanPosition) for capsule in capsules]
if capsuleDistance and minGhostDistance < 10 and min(capsuleDistance) < 10:
self.capsuleImpulse = True
# Eat the powerpelets before finishing the level
if capsuleDistance and self.nrOfFoods == 1 and oldFoodGrid[xPacman][yPacman]:
heuristic['PowerpeletFirst'] = -1000
self.capsuleImpulse = True
# If Ghosts not scared, than don't give higher heuristic for capsule eating
if self.capsuleImpulse and not(maxScaredTimer > 0):
if capsuleDistance:
heuristic['nearCapsule'] = 10 / min(capsuleDistance)
if pacmanPosition in capsules:
heuristic['eatCapsule'] = 300
self.capsuleImpulse = False
# World specific heuristics
if self.world == 'level0' or self.world == 'level1':
if self.nrOfFoods == 1 and maxScaredTimer > 0 and oldFoodGrid[xPacman][yPacman]:
heuristic['GhostsFirst'] = -10000
heuristic['score'] = state.getScore()
return heuristic.totalCount()
MyPacmanAgent = MyPacmanAgent | {
"repo_name": "ScaleRunner/PacmanAI",
"path": "6 - Contest/competitionAgents.py",
"copies": "1",
"size": "13362",
"license": "mit",
"hash": 8198738194058498000,
"line_mean": 34.6346666667,
"line_max": 143,
"alpha_frac": 0.649753031,
"autogenerated": false,
"ratio": 3.7344885410844046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48842415720844046,
"avg_score": null,
"num_lines": null
} |
# 4455770 Dennis Verheijden KI
# 4474139 Remco van der Heijden KI
# searchAgents.py
# ---------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
"""
This file contains all of the agents that can be selected to
control Pacman. To select an agent, use the '-p' option
when running pacman.py. Arguments can be passed to your agent
using '-a'. For example, to load a SearchAgent that uses
depth first search (dfs), run the following command:
> python pacman.py -p SearchAgent -a searchFunction=depthFirstSearch
Commands to invoke other search strategies can be found in the
project description.
Please only change the parts of the file you are asked to.
Look for the lines that say
"*** YOUR CODE HERE ***"
The parts you fill in start about 3/4 of the way down. Follow the
project description for details.
Good luck and happy searching!
"""
from game import Directions
from game import Agent
from game import Actions
import util
import time
import search
import searchAgents
class GoWestAgent(Agent):
"An agent that goes West until it can't."
def getAction(self, state):
"The agent receives a GameState (defined in pacman.py)."
if Directions.WEST in state.getLegalPacmanActions():
return Directions.WEST
else:
return Directions.STOP
#######################################################
# This portion is written for you, but will only work #
# after you fill in parts of search.py #
#######################################################
class SearchAgent(Agent):
"""
This very general search agent finds a path using a supplied search algorithm for a
supplied search problem, then returns actions to follow that path.
As a default, this agent runs DFS on a PositionSearchProblem to find location (1,1)
Options for fn include:
depthFirstSearch or dfs
breadthFirstSearch or bfs
Note: You should NOT change any code in SearchAgent
"""
def __init__(self, fn='depthFirstSearch', prob='PositionSearchProblem', heuristic='nullHeuristic'):
# Warning: some advanced Python magic is employed below to find the right functions and problems
# Get the search function from the name and heuristic
if fn not in dir(search):
raise AttributeError(fn + ' is not a search function in search.py.')
func = getattr(search, fn)
if 'heuristic' not in func.__code__.co_varnames:
print(('[SearchAgent] using function ' + fn))
self.searchFunction = func
else:
if heuristic in dir(searchAgents):
heur = getattr(searchAgents, heuristic)
elif heuristic in dir(search):
heur = getattr(search, heuristic)
else:
raise AttributeError(heuristic + ' is not a function in searchAgents.py or search.py.')
print(('[SearchAgent] using function %s and heuristic %s' % (fn, heuristic)))
# Note: this bit of Python trickery combines the search algorithm and the heuristic
self.searchFunction = lambda x: func(x, heuristic=heur)
# Get the search problem type from the name
if prob not in dir(searchAgents) or not prob.endswith('Problem'):
raise AttributeError(prob + ' is not a search problem type in SearchAgents.py.')
self.searchType = getattr(searchAgents, prob)
print(('[SearchAgent] using problem type ' + prob))
def registerInitialState(self, state):
"""
This is the first time that the agent sees the layout of the game board. Here, we
choose a path to the goal. In this phase, the agent should compute the path to the
goal and store it in a local variable. All of the work is done in this method!
state: a GameState object (pacman.py)
"""
if self.searchFunction == None: raise Exception("No search function provided for SearchAgent")
starttime = time.time()
problem = self.searchType(state) # Makes a new search problem
self.actions = self.searchFunction(problem) # Find a path
totalCost = problem.getCostOfActions(self.actions)
print(('Path found with total cost of %d in %.1f seconds' % (totalCost, time.time() - starttime)))
if '_expanded' in dir(problem): print(('Search nodes expanded: %d' % problem._expanded))
def getAction(self, state):
"""
Returns the next action in the path chosen earlier (in registerInitialState). Return
Directions.STOP if there is no further action to take.
state: a GameState object (pacman.py)
"""
if 'actionIndex' not in dir(self): self.actionIndex = 0
i = self.actionIndex
self.actionIndex += 1
if i < len(self.actions):
return self.actions[i]
else:
return Directions.STOP
class PositionSearchProblem(search.SearchProblem):
"""
A search problem defines the state space, start state, goal test,
successor function and cost function. This search problem can be
used to find paths to a particular point on the pacman board.
The state space consists of (x,y) positions in a pacman game.
Note: this search problem is fully specified; you should NOT change it.
"""
def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True):
"""
Stores the start and goal.
gameState: A GameState object (pacman.py)
costFn: A function from a search state (tuple) to a non-negative number
goal: A position in the gameState
"""
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
if start != None: self.startState = start
self.goal = goal
self.costFn = costFn
if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):
print('Warning: this does not look like a regular search maze')
# For display purposes
self._visited, self._visitedlist, self._expanded = {}, [], 0
def getStartState(self):
return self.startState
def isGoalState(self, state):
isGoal = state == self.goal
# For display purposes only
if isGoal:
self._visitedlist.append(state)
import __main__
if '_display' in dir(__main__):
if 'drawExpandedCells' in dir(__main__._display): #@UndefinedVariable
__main__._display.drawExpandedCells(self._visitedlist) #@UndefinedVariable
return isGoal
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append( ( nextState, action, cost) )
# Bookkeeping for display purposes
self._expanded += 1
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999
"""
if actions == None: return 999999
x,y= self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x,y))
return cost
class StayEastSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the West side of the board.
The cost function for stepping into a position (x,y) is 1/2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: .5 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn)
class StayWestSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the East side of the board.
The cost function for stepping into a position (x,y) is 2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: 2 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn)
def manhattanHeuristic(position, problem, info={}):
"The Manhattan distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
def euclideanHeuristic(position, problem, info={}):
"The Euclidean distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return ( (xy1[0] - xy2[0]) ** 2 + (xy1[1] - xy2[1]) ** 2 ) ** 0.5
#####################################################
# This portion is incomplete. Time to write code! #
#####################################################
class CornersProblem(search.SearchProblem):
"""
This search problem finds paths through all four corners of a layout.
You must select a suitable state space and successor function
"""
def __init__(self, startingGameState):
"""
Stores the walls, pacman's starting position and corners.
"""
self.walls = startingGameState.getWalls()
self.startingPosition = startingGameState.getPacmanPosition()
top, right = self.walls.height-2, self.walls.width-2
self.corners = ((1,1), (1,top), (right, 1), (right, top))
for corner in self.corners:
if not startingGameState.hasFood(*corner):
print(('Warning: no food in corner ' + str(corner)))
self._expanded = 0 # Number of search nodes expanded
self.startState = (self.startingPosition, self.corners)
def getStartState(self):
"Returns the start state (in your state space, not the full Pacman state space)"
#een state beavt: [positie, unvisitedCorners]
return self.startState
def isGoalState(self, state):
"Returns whether this search state is a goal state of the problem"
#Als univisitedCorners is leeg
return len(state[1]) == 0
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
# Add a successor state to the successor list if the action is legal
# Here's a code snippet for figuring out whether a new position hits a wall:
# x,y = currentPosition
x, y = state[0]
# dx, dy = Actions.directionToVector(action)
dx, dy = Actions.directionToVector(action)
# nextx, nexty = int(x + dx), int(y + dy)
nextx, nexty = int(x+dx), int(y+dy)
# hitsWall = self.walls[nextx][nexty]
hitsWall = self.walls[nextx][nexty]
if not hitsWall:
newNode = (nextx, nexty)
if newNode in state[1]:
# unvisitedCorners is een tuple, dus eerst omcasten voordat permutaties mogelijk zijn
unvisitedCorners = list(state[1])
unvisitedCorners.remove(newNode)
successors.append(((newNode, tuple(unvisitedCorners)), action, 1))
else:
successors.append(((newNode, state[1]), action, 1))
self._expanded += 1
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999. This is implemented for you.
"""
if actions == None: return 999999
x,y= self.startingPosition
for action in actions:
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]:
return 999999
return len(actions)
def cornersHeuristic(state, problem):
"""
A heuristic for the CornersProblem that you defined.
state: The current search state
(a data structure you chose in your search problem)
problem: The CornersProblem instance for this layout.
This function should always return a number that is a lower bound
on the shortest path from the state to a goal of the problem; i.e.
it should be admissible. (You need not worry about consistency for
this heuristic to receive full credit.)
"""
corners = problem.corners # These are the corner coordinates
walls = problem.walls # These are the walls of the maze, as a Grid (game.py)
heuristic = 0
currentNode = state[0]
unvisitedCorners = list(state[1])
# Zolang er nog unvisitedCorners zijn, bereken de corner dichtste bij en ga er naar toe
while len(unvisitedCorners) > 0:
for corner in unvisitedCorners:
cornerList=[]
newCorner =(util.manhattanDistance(currentNode, corner), corner)
cornerList.append(newCorner)
unvisitedCorners.remove(corner)
distance, corner = min(cornerList)
heuristic += distance
#print(heuristic)
currentNode = corner
return heuristic
class AStarCornersAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, cornersHeuristic)
self.searchType = CornersProblem
class FoodSearchProblem:
"""
A search problem associated with finding the a path that collects all of the
food (dots) in a Pacman game.
A search state in this problem is a tuple ( pacmanPosition, foodGrid ) where
pacmanPosition: a tuple (x,y) of integers specifying Pacman's position
foodGrid: a Grid (see game.py) of either True or False, specifying remaining food
"""
def __init__(self, startingGameState):
self.start = (startingGameState.getPacmanPosition(), startingGameState.getFood())
self.walls = startingGameState.getWalls()
self.startingGameState = startingGameState
self._expanded = 0
self.heuristicInfo = {} # A dictionary for the heuristic to store information
def getStartState(self):
return self.start
def isGoalState(self, state):
return state[1].count() == 0
def getSuccessors(self, state):
"Returns successor states, the actions they require, and a cost of 1."
successors = []
self._expanded += 1
for direction in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state[0]
dx, dy = Actions.directionToVector(direction)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextFood = state[1].copy()
nextFood[nextx][nexty] = False
successors.append((((nextx, nexty), nextFood), direction, 1))
return successors
def getCostOfActions(self, actions):
"""Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999"""
x,y= self.getStartState()[0]
cost = 0
for action in actions:
# figure out the next state and see whether it's legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]:
return 999999
cost += 1
return cost
class AStarFoodSearchAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, foodHeuristic)
self.searchType = FoodSearchProblem
def foodHeuristic(state, problem):
"""
Your heuristic for the FoodSearchProblem goes here.
This heuristic must be consistent to ensure correctness. First, try to come up
with an admissible heuristic; almost all admissible heuristics will be consistent
as well.
If using A* ever finds a solution that is worse uniform cost search finds,
your heuristic is *not* consistent, and probably not admissible! On the other hand,
inadmissible or inconsistent heuristics may find optimal solutions, so be careful.
The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a
Grid (see game.py) of either True or False. You can call foodGrid.asList()
to get a list of food coordinates instead.
If you want access to info like walls, capsules, etc., you can query the problem.
For example, problem.walls gives you a Grid of where the walls are.
If you want to *store* information to be reused in other calls to the heuristic,
there is a dictionary called problem.heuristicInfo that you can use. For example,
if you only want to count the walls once and store that value, try:
problem.heuristicInfo['wallCount'] = problem.walls.count()
Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount']
"""
position, foodGrid = state
heuristic = 0
currentNode = position
unvisitedFoods = foodGrid.asList()
#if there is any food left calculate the nearest one and go there
while len(unvisitedFoods) > 0:
for food in unvisitedFoods:
foodList=[]
newFood =(util.manhattanDistance(currentNode, food), food)
foodList.append(newFood)
unvisitedFoods.remove(food)
distance, food = min(foodList)
heuristic += distance
#print(heuristic)
currentNode = food
return heuristic
return 0
class ClosestDotSearchAgent(SearchAgent):
"Search for all food using a sequence of searches"
def registerInitialState(self, state):
self.actions = []
currentState = state
while(currentState.getFood().count() > 0):
nextPathSegment = self.findPathToClosestDot(currentState) # The missing piece
self.actions += nextPathSegment
for action in nextPathSegment:
legal = currentState.getLegalActions()
if action not in legal:
t = (str(action), str(currentState))
raise Exception('findPathToClosestDot returned an illegal move: %s!\n%s' % t)
currentState = currentState.generateSuccessor(0, action)
self.actionIndex = 0
print(('Path found with cost %d.' % len(self.actions)))
def findPathToClosestDot(self, gameState):
"Returns a path (a list of actions) to the closest dot, starting from gameState"
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition()
foodList = list(gameState.getFood())
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
path = search.bfs(problem)
return path
class AnyFoodSearchProblem(PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but
has a different goal test, which you need to fill in below. The
state space and successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in
the findPathToClosestDot method.
"""
def __init__(self, gameState):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test
that will complete the problem definition.
"""
#return true if food at current position
x,y = state
foodList = list(self.food)
#print(foodList[0][1])
return foodList[x][y]
class CrossroadSearchAgent(SearchAgent):
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
# Add a successor state to the successor list if the action is legal
# Here's a code snippet for figuring out whether a new position hits a wall:
# x,y = currentPosition
# dx, dy = Actions.directionToVector(action)
# nextx, nexty = int(x + dx), int(y + dy)
# hitsWall = self.walls[nextx][nexty]
# Bookkeeping for display purposes
self._expanded += 1
"*** YOUR CODE HERE ***"
return successors
##################
# Mini-contest 1 #
##################
class ApproximateSearchAgent(Agent):
"Implement your contest entry here. Change anything but the class name."
def registerInitialState(self, state):
"This method is called before any moves are made."
"*** YOUR CODE HERE ***"
self.walls = state.walls
def getAction(self, state):
"""
From game.py:
The Agent will receive a GameState and must return an action from
Directions.{North, South, East, West, Stop}
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def mazeDistance(point1, point2, gameState):
"""
Returns the maze distance between any two points, using the search functions
you have already built. The gameState can be any game state -- Pacman's position
in that state is ignored.
Example usage: mazeDistance( (2,4), (5,6), gameState)
This might be a useful helper function for your ApproximateSearchAgent.
"""
x1, y1 = point1
x2, y2 = point2
walls = gameState.getWalls()
assert not walls[x1][y1], 'point1 is a wall: ' + point1
assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False)
return len(search.bfs(prob))
| {
"repo_name": "ScaleRunner/PacmanAI",
"path": "4 - Multiagent/searchAgents.py",
"copies": "2",
"size": "23102",
"license": "mit",
"hash": -4612684963125125000,
"line_mean": 36.0224358974,
"line_max": 102,
"alpha_frac": 0.6775603844,
"autogenerated": false,
"ratio": 3.7699086161879896,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.544746900058799,
"avg_score": null,
"num_lines": null
} |
# 4455770 Dennis Verheijden KI
# 4474139 Remco van der Heijden KI
# search.py
# ---------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
"""
In search.py, you will implement generic search algorithms which are called
by Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions. The sequence must
be composed of legal moves
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other
maze, the sequence of moves will be incorrect, so only use this for tinyMaze
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s,s,w,s,w,w,s,w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first [p 85].
Your search algorithm needs to return a list of actions that reaches
the goal. Make sure to implement a graph search algorithm [Fig. 3.7].
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print "Start:", problem.getStartState()
print "Is the start a goal?", problem.isGoalState(problem.getStartState())
print "Start's successors:", problem.getSuccessors(problem.getStartState())
"""
path = util.Stack() # path bevat: [(huidige positie, acties tot nu toe) ...]
exploredList = []
start = problem.getStartState()
path.push((start, [])) # Er zijn nog geen mogelijke acties
#print("Startstate:", start)
while not path.isEmpty():
position, actions = path.pop() #opsplitsen van een element in path
for next, direction, cost in problem.getSuccessors(position):
if not next in exploredList: #loop-detectie
#print("Volgende node:", next, "in", direction)
if problem.isGoalState(next):
return actions + [direction]
path.push((next, actions + [direction]))
exploredList.append(next)
return[]
def breadthFirstSearch(problem):
path = util.Queue() #path bevat: [(huidige positie, acties tot nu toe) ...]
exploredList = []
start = problem.getStartState()
path.push((start, [])) #Er zijn nog geen mogelijke acties
#print("Startstate:", start)
while not path.isEmpty():
position, actions= path.pop() #opsplitsen van een element in path
for next, direction, cost in problem.getSuccessors(position):
if not next in exploredList: #loop-detectie
#print("Volgende node:", next, "in", direction)
if problem.isGoalState(next):
return actions + [direction]
path.push((next, actions + [direction]))
exploredList.append(next)
return[]
def uniformCostSearch(problem):
"Search the node of least total cost first. "
path = util.PriorityQueue() #path bevat: [(huidige positie, acties tot nu toe) ...]
exploredList= []
start = problem.getStartState()
path.push((start, []),0) #Er zijn nog geen mogelijke acties
#print("Startstate:", start)
while not path.isEmpty():
position, actions = path.pop() #opsplitsen van een element in path
for next, direction, cost in problem.getSuccessors(position):
if not next in exploredList: #loop-detectie
#print("Volgende node:", next, "in", direction)
if problem.isGoalState(next):
return actions + [direction]
new_actions = actions + [direction]
path.push((next, actions + [direction]),problem.getCostOfActions(new_actions))
exploredList.append(next)
return[]
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"Search the node that has the lowest combined cost and heuristic first."
path = util.PriorityQueue() #path bevat: [(huidige positie, acties tot nu toe) ...]
exploredList = []
start = problem.getStartState()
path.push((start, []),0) #Er zijn nog geen mogelijke acties
#print("Startstate:", start)
while not path.isEmpty():
position, actions = path.pop() #opsplitsen van een element in path
for next, direction, cost in problem.getSuccessors(position):
if not next in exploredList: #loop-detectie
#print("Volgende node:", next, "in", direction)
if problem.isGoalState(next):
return actions + [direction]
new_actions = actions + [direction]
totalCost = problem.getCostOfActions(new_actions) + heuristic(next, problem)
path.push((next, actions + [direction]),totalCost)
exploredList.append(next)
return[]
"Bonus assignment: Adjust the getSuccessors() method in CrossroadSearchAgent class"
"in searchAgents.py and test with:"
"python pacman.py -l bigMaze -z .5 -p CrossroadSearchAgent -a fn=astar,heuristic=manhattanHeuristic "
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
| {
"repo_name": "ScaleRunner/PacmanAI",
"path": "4 - Multiagent/search.py",
"copies": "2",
"size": "6615",
"license": "mit",
"hash": 5641382011822818000,
"line_mean": 34.7567567568,
"line_max": 103,
"alpha_frac": 0.6696900983,
"autogenerated": false,
"ratio": 3.730964467005076,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5400654565305076,
"avg_score": null,
"num_lines": null
} |
# 447. Number of Boomerangs
# Given n points in the plane that are all pairwise distinct, a "boomerang" is a tuple of points (i, j, k)
# such that the distance between i and j equals the distance between i and k (the order of the tuple matters).
#
# Find the number of boomerangs. You may assume that n will be at most 500 and coordinates of points
# are all in the range [-10000, 10000] (inclusive).
#
# Example:
#
# Input:
# [[0,0],[1,0],[2,0]]
#
# Output:
# 2
#
# Explanation:
# The two boomerangs are [[1,0],[0,0],[2,0]] and [[1,0],[2,0],[0,0]]
# https://leetcode.com/problems/number-of-boomerangs/discuss/92868/Short-Python-O(n2)-hashmap-solution
# for each point, create a hashmap and count all points with same distance.
# If for a point p, there are k points with distance d, number of boomerangs corresponding to that are k*(k-1).
# Keep adding these to get the final result.
class Solution:
def numberOfBoomerangs(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
# function to calculate distance between two points
def dis(a, b):
dx = a[0] - b[0]
dy = a[1] - b[1]
return dx * dx + dy * dy
res = 0
for p in points:
cmap = {} # create hashmap inside loop. for each point.
for q in points:
d = dis(p, q)
cmap[d] = cmap.get(d, 0) + 1
for k in cmap:
res += cmap[k] * (cmap[k] - 1)
return res
print(Solution().numberOfBoomerangs([[0,0],[1,0],[2,0]]))
| {
"repo_name": "gengwg/leetcode",
"path": "447_number_of_boomerangs.py",
"copies": "1",
"size": "1567",
"license": "apache-2.0",
"hash": 3079895666088842000,
"line_mean": 32.3404255319,
"line_max": 111,
"alpha_frac": 0.596043395,
"autogenerated": false,
"ratio": 3.2242798353909463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4320323230390946,
"avg_score": null,
"num_lines": null
} |
# 448. Find All Numbers Disappeared in an Array
# Given an array of integers where 1 ≤ a[i] ≤ n (n = size of array),
# some elements appear twice and others appear once.
#
# Find all the elements of [1, n] inclusive that do not appear in this array.
#
# Could you do it without extra space and in O(n) runtime?
# You may assume the returned list does not count as extra space.
#
# Example:
#
# Input:
# [4,3,2,7,8,2,3,1]
#
# Output:
# [5,6]
class Solution:
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
# initialize a hashmap of 1..n with value 0
cmap = {i:0 for i in range(1, len(nums)+1)}
# store the count of each number in nums
for num in nums:
cmap[num] += 1
res = []
# those num with count zero are missing from 1..n
for k in cmap:
if cmap[k] == 0:
res.append(k)
return res
print(Solution().findDisappearedNumbers([4,3,2,7,8,2,3,1]))
| {
"repo_name": "gengwg/leetcode",
"path": "448_find_all_numbers_disappeared_in_an_array.py",
"copies": "1",
"size": "1031",
"license": "apache-2.0",
"hash": -5966020215223221000,
"line_mean": 25.3333333333,
"line_max": 77,
"alpha_frac": 0.5871470302,
"autogenerated": false,
"ratio": 3.219435736677116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4306582766877116,
"avg_score": null,
"num_lines": null
} |
# 44
import math
import itertools
class Solve(object):
def __init__(self):
pass
def solve(self):
"""
Solve: find pentagon number small, big (small < big), s.t.:
delta = big - small (assume delta < small),
sum = small + big are also pentagon numbers
We loop big_x and small_x, test if:
1. delta = big - small is pentagon
2. sum = big + small is pentagon
"""
def is_pentagon(y):
"""
Calculates inverse function to test pentagon numbers,
function: y = x(3x - 1) / 2
inverse function: x = (root(24y + 1) + 1) / 6
"""
x = (math.sqrt(24 * y + 1) + 1) / 6
return int(x) == x and x
def func(x):
return x * (3 * x - 1) / 2
for big_x in itertools.count(start=1):
for small_x in range(1, big_x):
small = func(small_x)
big = func(big_x)
sum = small + big
delta = big - small
if is_pentagon(sum) and is_pentagon(delta):
return delta
s = Solve()
print s.solve() | {
"repo_name": "daicang/Euler",
"path": "p44.py",
"copies": "1",
"size": "1167",
"license": "mit",
"hash": -4831317539330718000,
"line_mean": 26.1627906977,
"line_max": 67,
"alpha_frac": 0.4747215081,
"autogenerated": false,
"ratio": 3.5907692307692307,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45654907388692306,
"avg_score": null,
"num_lines": null
} |
""" 4.4 OAUTH 2 for devices
Applications that run on devices with limited input capabilities
(such as game consoles, video cameras, and printers) can access a
Compliant API on behalf of a user, but the user must have separate
access to a computer or device with richer input capabilities.
The flow is as follows:
+---------+ +---------------+
| | | |
| Your |>--(A)---- Request Code --------->| Authorization |
| App | | Server |
| |<--(B)-- URL & Auth Code --------<| |
| | | |
| | (seperate device) | |
| |>--(C)- User login & consent ---->| |
| | | |
| |>--(D)---- Poll server ---------->| |
| | | |
| |<--(D)-- Token response ---------<| |
| | | |
+---------+ +---------------+
Figure 1: Device OAUTH2 Flow
The flow illustrated in Figure 1 includes the following steps:
(A) Your application begins this flow with a request to a Service URL
with a set of parameters. The response includes a device code,
a user code, a URL, an expiration, and a suggested polling
interval.
(B) After receipt of this response, your application shows the user
the URL and the user code, and instructs the user to open a
browser, navigate to the URL, and enter the code.
(C) The user switches to a device or computer with richer input
capabilities, launches a browser, navigates to the URL
specified on the limited-input device, logs in, and enters
the code.
(D) In the background, your application polls a Service endpoint
for an access token This token will only be returned to your
application after the user has logged in and approved the request.
"""
import logging
import functools
from flask import request, abort
from flask import _request_ctx_stack as stack
from werkzeug import cached_property
import datetime
import json
from ..utility import create_response, decode_base64, json_serial
log = logging.getLogger('flask_oauth2-devices')
class OAuth2DevicesProvider(object):
"""
Provide secure services for devices using OAuth2.
There are two usage modes. One is
binding the Flask app instance::
app = Flask(__name__)
oauth = OAuth2DevicesProvider(app)
The second possibility is to bind the Flask app later::
oauth = OAuth2DevicesProvider()
def create_app():
app = Flask(__name__)
oauth.init_app(app)
return app
"""
def __init__(self, app=None):
self._before_request_funcs = []
self._after_request_funcs = []
self._invalid_response = None
if app:
self.init_app(app)
def init_app(self, app):
"""
This callback can be used to initialize an application for the
oauth2 provider instance.
"""
self.app = app
app.extensions = getattr(app, 'extensions', {})
app.extensions['oauth2devices.provider.oauth2devices'] = self
@cached_property
def error_uri(self):
"""The error page URI.
"""
error_uri = self.app.config.get('OAUTH2_DEVICES_PROVIDER_ERROR_URI')
if error_uri:
return error_uri
error_endpoint = \
self.app.config.get('OAUTH2_DEVICES_PROVIDER_ERROR_ENDPOINT')
if error_endpoint:
return url_for(error_endpoint)
return '/oauth/errors'
def invalid_response(self, f):
"""Register a function for responsing with invalid request.
When an invalid request proceeds to :meth:`require_oauth`, we can
handle the request with the registered function. The function
accepts one parameter, which is an oauthlib Request object::
@oauth.invalid_response
def invalid_require_oauth(req):
return jsonify(message=req.error_message), 401
If no function is registered, it will return with ``abort(401)``.
"""
self._invalid_response = f
return f
def clientgetter(self, f):
"""Register a function as the client getter.
The function accepts one parameter `client_id`, and it returns
a client object with at least these information:
- client_id: A random string
- client_secret: A random string
- client_type: A string represents if it is `confidential`
- redirect_uris: A list of redirect uris
Implement the client getter:
@oauth.clientgetter
def get_client(client_id):
client = get_client_model(client_id)
# Client is an object
return client
"""
self._clientgetter = f
return f
def authcodesetter(self, f):
"""Register a function to save the auth code.
The setter accepts five parameters, a least
- code: our auth_code, if none we will generate one
- client_id: the client we want to create a new auth_code for
- user_id: the user we want to create a new auth_code for
Implement the auth_code setter:
@oauth.authcodesetter
def save_auth_code(code, client_id, user_id, *args, **kwargs)
auth_code_model.save_code(code, client, user_id)
"""
self._authcodesetter = f
return f
def authcodegetter(self, f):
""" Register a function as the client getter.
The function accepts one parameter `code`, and it returns
a code object.
Implement the auth code getter::
@oauth.authcodegetter
def load_auth_code(code):
code = get_code_model(code)
# Code is an object
return code
"""
self._authcodegetter = f
return f
def code_handler(self,
authorize_link,
activate_link,
expires_interval,
polling_internal):
""" Code handler decorator
The device requests an auth_code as part of (A)
For example, the client makes the following HTTP request using
transport-only security (with extra line breaks for display purposes
only):
POST /oauth/device HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
The authorization server MUST authenticate the client.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
ctx = stack.top
if ctx is not None and hasattr(ctx, 'request'):
request = ctx.request
if request.method != 'POST':
log.warn('Attempted a non-post on the code_handler')
return create_response({'Allow': 'POST'},
'must use POST', 405)
app = self.getApp(request)
if app is None:
raise OAuth2Exception(
'Invalid application credentials',
type='unauthorized_client'
)
auth_code = self._authcodesetter(None,
app.client_id,
app.user_id)
return self.create_oauth2_code_response(auth_code,
authorize_link,
activate_link,
expires_interval,
polling_internal)
return f(*args, **kwargs)
return wrapper
return decorator
def authorize_handler(self):
""" Authorize handler decorator
The device uses the auth_code and device code it recieved from (A)
and attempts to exchange it for an access token.
For example, the client makes the following HTTP request using
transport-layer security (with extra line breaks for display
purposes only):
POST /oauth/device/authorize HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
{
"auth_code": "656ea891"
"device_code: "c8fe9de9e6c5f80bc543c492aaa2fbaf2b081601"
}
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
ctx = stack.top
if ctx is not None and hasattr(ctx, 'request'):
request = ctx.request
if request.method != 'POST':
log.warn('Attempted a non-post on the code_handler')
return create_response({'Allow': 'POST'},
'must use POST', 405)
data = request.values
auth_code = self._authcodegetter(data.get('auth_code'))
device_code = data.get('device_code')
if device_code is None and auth_code is None:
return create_response({}, 'Accepted', 202)
if auth_code is None:
raise OAuth2Exception(
'This token could not be found',
type='invalid_token'
)
if auth_code.expires is None \
and auth_code.expires < datetime.utcnow():
raise OAuth2Exception(
'Authorization code has expired',
type='invalid_token'
)
if auth_code.is_active == 0:
raise OAuth2Exception(
'The user has rejected this connection',
type='rejected_connection'
)
if auth_code.get_device_code() != device_code:
raise OAuth2Exception(
'Your user code does not match the device',
type='invalid_token'
)
access_token = \
auth_code.exchange_for_access_token(auth_code)
return self.create_oauth2_token_response(access_token)
return f(*args, **kwargs)
return wrapper
return decorator
def _verify_request(self, scopes):
""" verify recieved oauth2 data
"""
if request.method == 'POST':
return False
uri = request.base_url
if request.query_string:
uri += '?' + request.query_string.decode('utf-8')
data = request.form.to_dict()
headers = dict(request.headers)
if ['oauth_version', 'oauth_nonce', 'oauth_timestamp\
', 'user' 'client'] not in data.keys():
return False
return True
def require_oauth(self, *scopes):
"""Protect resource with specified scopes."""
def wrapper(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
if hasattr(request, 'oauth') and request.oauth:
return f(*args, **kwargs)
if self._verify_request(scopes):
if self._invalid_response:
return self._invalid_response(request)
return abort(401)
request.oauth = req
return f(*args, **kwargs)
return decorated
return wrapper
def create_oauth2_code_response(self,
auth_code,
authorize_link=None,
activate_link=None,
expires_interval=0,
polling_interval=0):
"""
The authorization server issues an device code which the device
will have prompt the user to authorize before following the activate
link to exchange for a access token. The following parameters are
added to the entity-body of the HTTP response with a
200 (OK) status code:
device_code
REQUIRED. The device code generated on the fly for each device.
user_code
REQUIRED. The auth code issued by the authorization server.
authorize_link
REQUIRED. The link where auth code can be exchanged for access
token.
activate_link
REQUIRED. The link where auth code can be activated via user
consent flow.
expires_in
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
interval
REQUIRED. The recommended polling interval.
For example:
HTTP/1.1 200 OK
Content-Type: application/json;charset=UTF-8
Cache-Control: no-store
Pragma: no-cache
{
"device_code": "73de035b2a7bdcb2c092f4bdfe292898e0657a18",
"user_code": "656e6075",
"authorize_link":
"https://api.example.com/oauth/device/authorize",
"activate_link": "https://example.com/activate",
"expires_in": 3600,
"interval": 15
}
"""
response = create_response({
'Content-Type': 'application/json',
'Cache-Control': 'no-store',
'Pragma': 'no-cache'}, json.dumps({
'device_code': auth_code.get_device_code(),
'user_code ': auth_code.code,
'authorize_link': authorize_link,
'activate_link': activate_link,
'expires_in': expires_interval,
'interval': polling_interval}), 200)
return response
def create_oauth2_token_response(self, access_token):
"""
The authorization server issues an access token and optional refresh
token, and constructs the response by adding the following parameters
to the entity-body of the HTTP response with a 200 (OK) status code:
access_token
REQUIRED. The access token issued by the authorization server.
token_type
REQUIRED. The type of the token issued as described in
Section 7.1. Value is case insensitive.
expires_in
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
refresh_token
OPTIONAL. The refresh token, which can be used to obtain new
access tokens using the same authorization grant as described
in Section 6.
scope
OPTIONAL, if identical to the scope requested by the client;
otherwise, REQUIRED. The scope of the access token as
described by Section 3.3.
The parameters are included in the entity-body of the HTTP response
using the "application/json" media type as defined by [RFC4627]. The
parameters are serialized into a JavaScript Object Notation (JSON)
structure by adding each parameter at the highest structure level.
Parameter names and string values are included as JSON strings.
Numerical values are included as JSON numbers. The order of
parameters does not matter and can vary.
The authorization server MUST include the HTTP "Cache-Control"
response header field [RFC2616] with a value of "no-store" in any
response containing tokens, credentials, or other sensitive
information, as well as the "Pragma" response header field [RFC2616]
with a value of "no-cache".
For example:
HTTP/1.1 200 OK
Content-Type: application/json;charset=UTF-8
Cache-Control: no-store
Pragma: no-cache
{
"access_token":"2YotnFZFEjr1zCsicMWpAA",
"token_type":"example",
"scope":"public private",
"expires_in":3600,
"refresh_token":"tGzv3JOkF0XG5Qx2TlKWIA"
}
The client MUST ignore unrecognized value names in the response. The
sizes of tokens and other values received from the authorization
server are left undefined. The client should avoid making
assumptions about value sizes. The authorization server SHOULD
document the size of any value it issues.
http://tools.ietf.org/html/rfc6749#section-5.1
"""
response = create_response({
'Content-Type': 'application/json',
'Cache-Control': 'no-store',
'Pragma': 'no-cache'}, json.dumps({
'access_token': access_token.access_token,
'token_type ': access_token.token_type,
'scope': access_token.scopes,
'expires_in': json.dumps(access_token.expires,
default=json_serial).replace("\"", "\
"),
'refresh_token': None}), 200)
return response
def getApp(self, request):
# http://tools.ietf.org/html/rfc2617#section-2
client_id = None
client_secret = None
if "Authorization" in request.headers:
auth_header = request.headers['Authorization']
if "basic" in auth_header:
auth = decode_base64(auth_header[6:]).split(':')
client_id = auth[0]
client_secret = auth[1]
if client_id is None:
raise OAuth2Exception(
'A valid client ID must be provided along with request made',
type='invalid_client'
)
app = self._clientgetter(client_id)
if app is None:
raise OAuth2Exception(
'A valid client ID must be provided along with request made',
type='invalid_client'
)
if client_secret is not None and client_secret == app.client_secret:
return app
raise OAuth2Exception(
'A valid client secret must be provided along with request made',
type='invalid_secret'
)
class OAuth2Exception(Exception):
""" Class for handling API Excpetions and Errors
"""
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
| {
"repo_name": "greedo/flask-oauth2-devices",
"path": "devices/provider/devices.py",
"copies": "1",
"size": "20182",
"license": "mit",
"hash": -8506550785617520000,
"line_mean": 37.2234848485,
"line_max": 78,
"alpha_frac": 0.5319591715,
"autogenerated": false,
"ratio": 4.944145026947575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 528
} |
"""4_4_to_4_5
Revision ID: a6d00b128933
Revises: c7652b2a97a4
Create Date: 2018-08-05 09:05:15.625382
"""
from alembic import op
import sqlalchemy as sa
from manager_rest.storage.models_base import UTCDateTime
# revision identifiers, used by Alembic.
revision = 'a6d00b128933'
down_revision = 'c7652b2a97a4'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('executions',
sa.Column('started_at', UTCDateTime(), nullable=True))
op.execute('COMMIT')
# Add new execution status
op.execute("alter type execution_status add value 'queued'")
# add execution_fk index to logs and events
op.create_index(
op.f('events__execution_fk_idx'),
'events', ['_execution_fk'],
unique=False)
op.create_index(
op.f('logs__execution_fk_idx'),
'logs', ['_execution_fk'],
unique=False)
# re-make FKs with ondelete=cascade
op.drop_constraint(
u'groups_tenants_group_id_fkey', 'groups_tenants', type_='foreignkey')
op.drop_constraint(
u'groups_tenants_tenant_id_fkey', 'groups_tenants', type_='foreignkey')
op.drop_constraint(
u'groups_tenants_role_id_fkey', 'groups_tenants', type_='foreignkey')
op.create_foreign_key(
op.f('groups_tenants_tenant_id_fkey'),
'groups_tenants',
'tenants', ['tenant_id'], ['id'],
ondelete='CASCADE')
op.create_foreign_key(
op.f('groups_tenants_group_id_fkey'),
'groups_tenants',
'groups', ['group_id'], ['id'],
ondelete='CASCADE')
op.create_foreign_key(
op.f('groups_tenants_role_id_fkey'),
'groups_tenants',
'roles', ['role_id'], ['id'],
ondelete='CASCADE')
op.drop_constraint(
u'users_tenants_user_id_fkey', 'users_tenants', type_='foreignkey')
op.drop_constraint(
u'users_tenants_tenant_id_fkey', 'users_tenants', type_='foreignkey')
op.drop_constraint(
u'users_tenants_role_id_fkey', 'users_tenants', type_='foreignkey')
op.create_foreign_key(
op.f('users_tenants_tenant_id_fkey'),
'users_tenants',
'tenants', ['tenant_id'], ['id'],
ondelete='CASCADE')
op.create_foreign_key(
op.f('users_tenants_user_id_fkey'),
'users_tenants',
'users', ['user_id'], ['id'],
ondelete='CASCADE')
op.create_foreign_key(
op.f('users_tenants_role_id_fkey'),
'users_tenants',
'roles', ['role_id'], ['id'],
ondelete='CASCADE')
# set null=true
op.alter_column(
'groups_tenants', 'role_id', existing_type=sa.INTEGER(), nullable=True)
# dep_up blueprint fks
op.create_foreign_key(
op.f('deployment_updates__old_blueprint_fk_fkey'),
'deployment_updates',
'blueprints', ['_old_blueprint_fk'], ['_storage_id'],
ondelete='CASCADE')
op.create_foreign_key(
op.f('deployment_updates__new_blueprint_fk_fkey'),
'deployment_updates',
'blueprints', ['_new_blueprint_fk'], ['_storage_id'],
ondelete='CASCADE')
# adding tenant_id indexes
op.create_index(
op.f('blueprints__tenant_id_idx'),
'blueprints', ['_tenant_id'],
unique=False)
op.create_index(
op.f('deployment_modifications__tenant_id_idx'),
'deployment_modifications', ['_tenant_id'],
unique=False)
op.create_index(
op.f('deployment_update_steps__tenant_id_idx'),
'deployment_update_steps', ['_tenant_id'],
unique=False)
op.create_index(
op.f('deployment_updates__tenant_id_idx'),
'deployment_updates', ['_tenant_id'],
unique=False)
op.create_index(
op.f('deployments__tenant_id_idx'),
'deployments', ['_tenant_id'],
unique=False)
op.create_index(
op.f('events__tenant_id_idx'), 'events', ['_tenant_id'], unique=False)
op.create_index(
op.f('executions__tenant_id_idx'),
'executions', ['_tenant_id'],
unique=False)
op.create_index(
op.f('logs__tenant_id_idx'), 'logs', ['_tenant_id'], unique=False)
op.create_index(
op.f('nodes__tenant_id_idx'), 'nodes', ['_tenant_id'], unique=False)
op.create_index(
op.f('node_instances__tenant_id_idx'),
'node_instances', ['_tenant_id'],
unique=False)
op.create_index(
op.f('plugins__tenant_id_idx'),
'plugins', ['_tenant_id'],
unique=False)
op.create_index(
op.f('snapshots__tenant_id_idx'),
'snapshots', ['_tenant_id'],
unique=False)
op.create_index(
op.f('secrets__tenant_id_idx'),
'secrets', ['_tenant_id'],
unique=False)
# removing duplicated indexes
op.drop_index('ix_blueprints_created_at', table_name='blueprints')
op.drop_index('ix_blueprints_id', table_name='blueprints')
op.drop_index(
'ix_deployment_modifications_created_at',
table_name='deployment_modifications')
op.drop_index(
'ix_deployment_modifications_ended_at',
table_name='deployment_modifications')
op.drop_index(
'ix_deployment_modifications_id',
table_name='deployment_modifications')
op.drop_index(
'ix_deployment_update_steps_id', table_name='deployment_update_steps')
op.drop_index(
'ix_deployment_updates_created_at', table_name='deployment_updates')
op.drop_index('ix_deployment_updates_id', table_name='deployment_updates')
op.drop_index('ix_deployments_created_at', table_name='deployments')
op.drop_index('ix_deployments_id', table_name='deployments')
op.drop_index('ix_events_id', table_name='events')
op.drop_index('ix_logs_id', table_name='logs')
op.drop_index('ix_executions_created_at', table_name='executions')
op.drop_index('ix_executions_id', table_name='executions')
op.drop_index('ix_groups_ldap_dn', table_name='groups')
op.drop_index('ix_groups_name', table_name='groups')
op.drop_index('ix_node_instances_id', table_name='node_instances')
op.drop_index('ix_nodes_id', table_name='nodes')
op.drop_index('ix_nodes_type', table_name='nodes')
op.drop_index('ix_plugins_archive_name', table_name='plugins')
op.drop_index('ix_plugins_id', table_name='plugins')
op.drop_index('ix_plugins_package_name', table_name='plugins')
op.drop_index('ix_plugins_uploaded_at', table_name='plugins')
op.drop_index('ix_secrets_created_at', table_name='secrets')
op.drop_index('ix_secrets_id', table_name='secrets')
op.drop_index('ix_snapshots_created_at', table_name='snapshots')
op.drop_index('ix_snapshots_id', table_name='snapshots')
op.drop_index('ix_tenants_name', table_name='tenants')
op.drop_index('ix_users_username', table_name='users')
op.drop_index('ix_roles_name', table_name='roles')
def downgrade():
op.drop_column('executions', 'started_at')
# remove the 'queued' value of the execution status enum.
# Since we are downgrading, and in older versions the `queue` option does
# not exist, we change it to `failed`.
op.execute("""
update executions
set status='failed'
where status='queued'
""")
# unfortunately postgres doesn't directly support removing enum values,
# so we create a new type with the correct enum values and swap
# out the old one
op.execute("alter type execution_status rename to execution_status_old")
# create the new type
execution_status = sa.Enum(
'terminated',
'failed',
'cancelled',
'pending',
'started',
'cancelling',
'force_cancelling',
'kill_cancelling',
name='execution_status',
)
execution_status.create(op.get_bind())
# update executions to use the new type
op.alter_column(
'executions',
'status',
type_=execution_status,
postgresql_using='status::text::execution_status')
# remove the old type
op.execute("DROP TYPE execution_status_old;")
op.drop_index(op.f('logs__execution_fk_idx'), table_name='logs')
op.drop_index(op.f('events__execution_fk_idx'), table_name='events')
# re-make FKs without ondelete=cascade
op.drop_constraint(
op.f('users_tenants_role_id_fkey'),
'users_tenants',
type_='foreignkey')
op.drop_constraint(
op.f('users_tenants_user_id_fkey'),
'users_tenants',
type_='foreignkey')
op.drop_constraint(
op.f('users_tenants_tenant_id_fkey'),
'users_tenants',
type_='foreignkey')
op.create_foreign_key(u'users_tenants_role_id_fkey', 'users_tenants',
'roles', ['role_id'], ['id'])
op.create_foreign_key(u'users_tenants_tenant_id_fkey', 'users_tenants',
'tenants', ['tenant_id'], ['id'])
op.create_foreign_key(u'users_tenants_user_id_fkey', 'users_tenants',
'users', ['user_id'], ['id'])
op.drop_constraint(
op.f('groups_tenants_role_id_fkey'),
'groups_tenants',
type_='foreignkey')
op.drop_constraint(
op.f('groups_tenants_group_id_fkey'),
'groups_tenants',
type_='foreignkey')
op.drop_constraint(
op.f('groups_tenants_tenant_id_fkey'),
'groups_tenants',
type_='foreignkey')
op.create_foreign_key(u'groups_tenants_role_id_fkey', 'groups_tenants',
'roles', ['role_id'], ['id'])
op.create_foreign_key(u'groups_tenants_tenant_id_fkey', 'groups_tenants',
'tenants', ['tenant_id'], ['id'])
op.create_foreign_key(u'groups_tenants_group_id_fkey', 'groups_tenants',
'groups', ['group_id'], ['id'])
# set null=false
op.alter_column(
'groups_tenants',
'role_id',
existing_type=sa.INTEGER(),
nullable=False)
# dep_up blueprint fks
op.drop_constraint(
op.f('deployment_updates__new_blueprint_fk_fkey'),
'deployment_updates',
type_='foreignkey')
op.drop_constraint(
op.f('deployment_updates__old_blueprint_fk_fkey'),
'deployment_updates',
type_='foreignkey')
# tenant_id indexes
op.drop_index(op.f('blueprints__tenant_id_idx'), table_name='blueprints')
op.drop_index(
op.f('deployment_update_steps__tenant_id_idx'),
table_name='deployment_update_steps')
op.drop_index(op.f('deployments__tenant_id_idx'), table_name='deployments')
op.drop_index(op.f('events__tenant_id_idx'), table_name='events')
op.drop_index(
op.f('deployment_modifications__tenant_id_idx'),
table_name='deployment_modifications')
op.drop_index(
op.f('deployment_updates__tenant_id_idx'),
table_name='deployment_updates')
op.drop_index(op.f('logs__tenant_id_idx'), table_name='logs')
op.drop_index(
op.f('node_instances__tenant_id_idx'), table_name='node_instances')
op.drop_index(op.f('snapshots__tenant_id_idx'), table_name='snapshots')
op.drop_index(op.f('secrets__tenant_id_idx'), table_name='secrets')
op.drop_index(op.f('plugins__tenant_id_idx'), table_name='plugins')
op.drop_index(op.f('nodes__tenant_id_idx'), table_name='nodes')
op.drop_index(op.f('executions__tenant_id_idx'), table_name='executions')
# duplicated indexes in 4.4
op.create_index('ix_blueprints_id', 'blueprints', ['id'], unique=False)
op.create_index(
'ix_blueprints_created_at', 'blueprints', ['created_at'], unique=False)
op.create_index(
'ix_deployment_modifications_id',
'deployment_modifications', ['id'],
unique=False)
op.create_index(
'ix_deployment_modifications_ended_at',
'deployment_modifications', ['ended_at'],
unique=False)
op.create_index(
'ix_deployment_modifications_created_at',
'deployment_modifications', ['created_at'],
unique=False)
op.create_index(
'ix_deployment_update_steps_id',
'deployment_update_steps', ['id'],
unique=False)
op.create_index(
'ix_deployment_updates_id', 'deployment_updates', ['id'], unique=False)
op.create_index(
'ix_deployment_updates_created_at',
'deployment_updates', ['created_at'],
unique=False)
op.create_index('ix_events_id', 'events', ['id'], unique=False)
op.create_index('ix_logs_id', 'logs', ['id'], unique=False)
op.create_index('ix_deployments_id', 'deployments', ['id'], unique=False)
op.create_index(
'ix_deployments_created_at',
'deployments', ['created_at'],
unique=False)
op.create_index(
'ix_plugins_uploaded_at', 'plugins', ['uploaded_at'], unique=False)
op.create_index(
'ix_plugins_package_name', 'plugins', ['package_name'], unique=False)
op.create_index('ix_plugins_id', 'plugins', ['id'], unique=False)
op.create_index(
'ix_plugins_archive_name', 'plugins', ['archive_name'], unique=False)
op.create_index('ix_nodes_type', 'nodes', ['type'], unique=False)
op.create_index('ix_nodes_id', 'nodes', ['id'], unique=False)
op.create_index(
'ix_node_instances_id', 'node_instances', ['id'], unique=False)
op.create_index('ix_users_username', 'users', ['username'], unique=True)
op.create_index('ix_tenants_name', 'tenants', ['name'], unique=True)
op.create_index('ix_roles_name', 'roles', ['name'], unique=True)
op.create_index('ix_snapshots_id', 'snapshots', ['id'], unique=False)
op.create_index(
'ix_snapshots_created_at', 'snapshots', ['created_at'], unique=False)
op.create_index('ix_secrets_id', 'secrets', ['id'], unique=False)
op.create_index(
'ix_secrets_created_at', 'secrets', ['created_at'], unique=False)
op.create_index('ix_groups_name', 'groups', ['name'], unique=True)
op.create_index('ix_groups_ldap_dn', 'groups', ['ldap_dn'], unique=True)
op.create_index('ix_executions_id', 'executions', ['id'], unique=False)
op.create_index(
'ix_executions_created_at', 'executions', ['created_at'], unique=False)
| {
"repo_name": "cloudify-cosmo/cloudify-manager",
"path": "resources/rest-service/cloudify/migrations/versions/a6d00b128933_4_4_to_4_5.py",
"copies": "1",
"size": "14186",
"license": "apache-2.0",
"hash": 3211950702491850000,
"line_mean": 35.3743589744,
"line_max": 79,
"alpha_frac": 0.6105315099,
"autogenerated": false,
"ratio": 3.423262548262548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4533794058162548,
"avg_score": null,
"num_lines": null
} |
"""44. Wildcard Matching
https://leetcode.com/problems/wildcard-matching/description/
Given an input string (s) and a pattern (p), implement wildcard pattern
matching with support for '?' and '*'.
'?' Matches any single character.
'*' Matches any sequence of characters (including the empty sequence).
The matching should cover the entire input string (not partial).
Note:
s could be empty and contains only lowercase letters a-z.
p could be empty and contains only lowercase letters a-z, and characters like
? or *.
Example 1:
Input:
s = "aa"
p = "a"
Output: false
Explanation: "a" does not match the entire string "aa".
Example 2:
Input:
s = "aa"
p = "*"
Output: true
Explanation: '*' matches any sequence.
Example 3:
Input:
s = "cb"
p = "?a"
Output: false
Explanation: '?' matches 'c', but the second letter is 'a', which does not
match 'b'.
Example 4:
Input:
s = "adceb"
p = "*a*b"
Output: true
Explanation: The first '*' matches the empty sequence, while the second '*'
matches the substring "dce".
Example 5:
Input:
s = "acdcb"
p = "a*c?b"
Output: false
"""
class Solution:
def is_match(self, s: str, p: str) -> bool:
s_len, p_len = len(s), len(p)
s_idx, p_idx = 0, 0
p_star_idx = -1
s_temp_idx = -1
while s_idx < s_len:
if p_idx == p_len or (
p[p_idx] not in ["?", "*"] and p[p_idx] != s[s_idx]):
if p_star_idx == -1:
return False
else:
s_idx = s_temp_idx + 1
s_temp_idx = s_idx
p_idx = p_star_idx + 1
elif p[p_idx] == s[s_idx] or p[p_idx] == "?":
p_idx += 1
s_idx += 1
else:
# "*" matches a sequence of characters increased from 0.
p_star_idx = p_idx
s_temp_idx = s_idx - 1
p_idx += 1
return all(x == "*" for x in p[p_idx:])
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/wildcard_matching.py",
"copies": "1",
"size": "1986",
"license": "mit",
"hash": -1729538180068083500,
"line_mean": 19.2040816327,
"line_max": 77,
"alpha_frac": 0.5434343434,
"autogenerated": false,
"ratio": 3.230016313213703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42734506566137026,
"avg_score": null,
"num_lines": null
} |
# 451. Sort Characters By Frequency
# Given a string, sort it in decreasing order based on the frequency of characters.
#
# Example 1:
#
# Input:
# "tree"
#
# Output:
# "eert"
#
# Explanation:
# 'e' appears twice while 'r' and 't' both appear once.
# So 'e' must appear before both 'r' and 't'. Therefore "eetr" is also a valid answer.
#
# Example 2:
#
# Input:
# "cccaaa"
#
# Output:
# "cccaaa"
#
# Explanation:
# Both 'c' and 'a' appear three times, so "aaaccc" is also a valid answer.
# Note that "cacaca" is incorrect, as the same characters must be together.
#
# Example 3:
#
# Input:
# "Aabb"
#
# Output:
# "bbAa"
#
# Explanation:
# "bbaA" is also a valid answer, but "Aabb" is incorrect.
# Note that 'A' and 'a' are treated as two different characters.
#
class Solution:
def frequencySort(self, s):
"""
:type s: str
:rtype: str
"""
d = {}
for c in s:
if c not in d:
d[c] = 1
else:
d[c] += 1
# print(d)
li = sorted(d.items(), key=lambda x: -x[1])
res = ''
for x in li:
res += x[0] * x[1]
return res
print(Solution().frequencySort("tree"))
print(Solution().frequencySort("Aabb"))
print(Solution().frequencySort("cccaaa"))
| {
"repo_name": "gengwg/leetcode",
"path": "451_sort_characters_by_frequency.py",
"copies": "1",
"size": "1293",
"license": "apache-2.0",
"hash": 2894370152374923300,
"line_mean": 18.5909090909,
"line_max": 86,
"alpha_frac": 0.56612529,
"autogenerated": false,
"ratio": 3.028103044496487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40942283344964875,
"avg_score": null,
"num_lines": null
} |
# 454. 4Sum II
# Difficulty:Medium
# Given four lists A, B, C, D of integer values, compute how many tuples (i,
# j, k, l) there are such that A[i] + B[j] + C[k] + D[l] is zero.
#
# To make problem a bit easier, all A, B, C, D have same length of N where 0
# ≤ N ≤ 500. All integers are in the range of -228 to 228 - 1 and the result
# is guaranteed to be at most 231 - 1.
#
# Example:
#
# Input:
# A = [ 1, 2]
# B = [-2,-1]
# C = [-1, 2]
# D = [ 0, 2]
#
# Output:
# 2
#
# Explanation:
# The two tuples are:
# 1. (0, 0, 0, 1) -> A[0] + B[0] + C[0] + D[1] = 1 + (-2) + (-1) + 2 = 0
# 2. (1, 1, 0, 0) -> A[1] + B[1] + C[0] + D[0] = 2 + (-1) + (-1) + 0 = 0
from collections import Counter
class Solution:
def fourSumCount(self, A, B, C, D):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
a_b_sum_counter = Counter(
[A[i] + B[j] for i in range(len(A)) for j in range(len(B))])
count = 0
for i in range(len(C)):
for j in range(len(D)):
curr_num_negative = -(C[i] + D[j])
if curr_num_negative in a_b_sum_counter:
count += a_b_sum_counter[curr_num_negative]
return count
class RefSolution:
def fourSumCount(self, A, B, C, D):
AB = Counter(a + b for a in A for b in B)
return sum(AB[-c - d] for c in C for d in D)
if __name__ == '__main__':
A = [1, 2]
B = [-2, -1]
C = [-1, 2]
D = [0, 2]
sol = Solution()
print(sol.fourSumCount(A, B, C, D))
| {
"repo_name": "kingdaa/LC-python",
"path": "lc/454_4Sum_II.py",
"copies": "1",
"size": "1606",
"license": "mit",
"hash": 4640598504896212000,
"line_mean": 24.8387096774,
"line_max": 76,
"alpha_frac": 0.4900124844,
"autogenerated": false,
"ratio": 2.6006493506493507,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.359066183504935,
"avg_score": null,
"num_lines": null
} |
"""459. Repeated Substring Pattern
https://leetcode.com/problems/repeated-substring-pattern/description/
Given a non-empty string check if it can be constructed by taking a substring of
it and appending multiple copies of the substring together. You may assume the given
string consists of lowercase English letters only and its length will not exceed 10000.
Example 1:
Input: "abab"
Output: True
Explanation: It's the substring "ab" twice.
Example 2:
Input: "aba"
Output: False
Example 3:
Input: "abcabcabcabc"
Output: True
Explanation: It's the substring "abc" four times. (And the substring "abcabc" twice.)
"""
class Solution(object):
def repeatedSubstringPattern(self, s):
n = len(s)
for i in range(1, n):
if n % (n - i) == 0:
suffix = s[i:]
if suffix * (n // len(suffix)) == s:
return True
return False
if __name__ == "__main__":
s = Solution()
test_cases = [
("abcabcabcabc", True),
("abab", True),
("abc", False),
("aaaabaaaab", True),
("aa", True),
("ab", False)
]
for pattern, expect in test_cases:
result = s.repeatedSubstringPattern(pattern)
if result == expect:
print("ok ->", result)
else:
print("nok ->", result)
break
| {
"repo_name": "rcanepa/cs-fundamentals",
"path": "python/interview_questions/repeated_substring_pattern.py",
"copies": "1",
"size": "1362",
"license": "mit",
"hash": -387427203498578560,
"line_mean": 23.7636363636,
"line_max": 87,
"alpha_frac": 0.5910425844,
"autogenerated": false,
"ratio": 3.772853185595568,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9859613776519651,
"avg_score": 0.0008563986951832576,
"num_lines": 55
} |
# 45
import itertools
class Solve(object):
def __init__(self):
self.pen_start_idx = 1
self.hex_start_idx = 1
def tri(self, n):
"""
Return nth triangle numbers
"""
return n*(n+1)/2
def find_pen(self, target):
"""
Find target in pentagonal numbers
"""
for n in itertools.count(start=self.pen_start_idx):
value = n*(3*n-1)/2
if value > target:
self.pen_start_idx = n
return False
if value == target:
self.pen_start_idx = n + 1
return True
def find_hex(self, target):
"""
Find target in hexagonal numbers
"""
for n in itertools.count(start=self.hex_start_idx):
value = n*(2*n-1)
if value > target:
self.hex_start_idx = n
return False
if value == target:
self.hex_start_idx = n + 1
return True
def solve(self):
for n in itertools.count(start=286):
target = self.tri(n)
if self.find_pen(target) and self.find_hex(target):
return target
s = Solve()
print s.solve() | {
"repo_name": "daicang/Euler",
"path": "p45.py",
"copies": "1",
"size": "1244",
"license": "mit",
"hash": 7261751180357199000,
"line_mean": 24.4081632653,
"line_max": 63,
"alpha_frac": 0.481511254,
"autogenerated": false,
"ratio": 3.9871794871794872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49686907411794873,
"avg_score": null,
"num_lines": null
} |
"""4.5 to 4.5.5
- Add dry_run indication in executions table
- Add capabilities field to deployments
- Add source/target nodes instance IDs to events and logs
- Add the agents table
- Add the operations and tasks_graphs tables
Revision ID: 1fbd6bf39e84
Revises: a6d00b128933
Create Date: 2018-10-07 06:31:52.955877
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from cloudify.models_states import AgentState, VisibilityState
from manager_rest.storage.models_base import UTCDateTime, JSONString
# revision identifiers, used by Alembic.
revision = '1fbd6bf39e84'
down_revision = 'a6d00b128933'
branch_labels = None
depends_on = None
resource_tables = ['blueprints', 'plugins', 'secrets', 'snapshots', 'events',
'executions', 'logs', 'nodes', 'node_instances',
'deployments', 'deployment_modifications',
'deployment_updates', 'deployment_update_steps']
def upgrade():
# In snapshots < 4.5.5 failed_logins_counter may be null, from 4.5.5
# we want to make sure all null values will be replaced with zeros.
op.execute("""
UPDATE users
SET failed_logins_counter = 0
WHERE failed_logins_counter IS NULL;
""")
# Return the null constraint to the `failed_logins_counter` column.
op.alter_column('users',
'failed_logins_counter',
nullable=False)
# server_default accepts string or SQL element only
op.add_column('executions', sa.Column('is_dry_run',
sa.Boolean(),
nullable=False,
server_default='f'))
op.add_column('executions',
sa.Column('scheduled_for', UTCDateTime(), nullable=True))
op.execute('COMMIT')
# Add new execution status
op.execute("alter type execution_status add value 'scheduled'")
op.add_column(
'deployments',
sa.Column('capabilities', sa.PickleType(comparator=lambda *a: False))
)
op.add_column('events', sa.Column('source_id', sa.Text(), nullable=True))
op.add_column('events', sa.Column('target_id', sa.Text(), nullable=True))
op.add_column('logs', sa.Column('source_id', sa.Text(), nullable=True))
op.add_column('logs', sa.Column('target_id', sa.Text(), nullable=True))
# Create the agents table
visibility_enum = postgresql.ENUM(*VisibilityState.STATES,
name='visibility_states',
create_type=False)
agent_states_enum = postgresql.ENUM(*AgentState.STATES,
name='agent_states')
op.create_table(
'agents',
sa.Column('_storage_id', sa.Integer(), nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('ip', sa.Text(), nullable=True),
sa.Column('install_method', sa.Text(), nullable=False),
sa.Column('system', sa.Text(), nullable=True),
sa.Column('version', sa.Text(), nullable=False),
sa.Column('state', agent_states_enum, nullable=False),
sa.Column('visibility', visibility_enum, nullable=True),
sa.Column('rabbitmq_username', sa.Text(), nullable=True),
sa.Column('rabbitmq_password', sa.Text(), nullable=True),
sa.Column('rabbitmq_exchange', sa.Text(), nullable=False),
sa.Column('created_at', UTCDateTime(), nullable=False),
sa.Column('updated_at', UTCDateTime(), nullable=True),
sa.Column('_node_instance_fk', sa.Integer(), nullable=False),
sa.Column('_tenant_id', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_creator_id'],
[u'users.id'],
ondelete='CASCADE'
),
sa.ForeignKeyConstraint(
['_node_instance_fk'],
[u'node_instances._storage_id'],
ondelete='CASCADE'
),
sa.ForeignKeyConstraint(
['_tenant_id'],
[u'tenants.id'],
ondelete='CASCADE'
),
sa.PrimaryKeyConstraint('_storage_id')
)
op.create_index(
op.f('agents__tenant_id_idx'),
'agents',
['_tenant_id'],
unique=False)
op.create_index(
op.f('agents_created_at_idx'),
'agents',
['created_at'],
unique=False
)
op.create_index(
op.f('agents_id_idx'),
'agents',
['id'],
unique=False
)
# Remove the deprecated column private_resource from all the
# resources tables
for table_name in resource_tables:
op.drop_column(table_name, 'private_resource')
op.create_table(
'tasks_graphs',
sa.Column('_storage_id', sa.Integer(),
autoincrement=True, nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column('visibility', visibility_enum, nullable=True),
sa.Column('name', sa.Text(), nullable=True),
sa.Column('created_at', UTCDateTime(), nullable=False),
sa.Column('_execution_fk', sa.Integer(), nullable=False),
sa.Column('_tenant_id', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['_creator_id'], [u'users.id'],
name=op.f('tasks_graphs__creator_id_fkey'),
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['_execution_fk'], [u'executions._storage_id'],
name=op.f('tasks_graphs__execution_fk_fkey'),
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['_tenant_id'], [u'tenants.id'],
name=op.f('tasks_graphs__tenant_id_fkey'),
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('_storage_id', name=op.f('tasks_graphs_pkey'))
)
op.create_index(op.f('tasks_graphs__tenant_id_idx'), 'tasks_graphs',
['_tenant_id'], unique=False)
op.create_index(op.f('tasks_graphs_created_at_idx'), 'tasks_graphs',
['created_at'], unique=False)
op.create_index(op.f('tasks_graphs_id_idx'), 'tasks_graphs', ['id'],
unique=False)
op.create_table(
'operations',
sa.Column('_storage_id', sa.Integer(), autoincrement=True,
nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column('visibility', visibility_enum, nullable=True),
sa.Column('name', sa.Text(), nullable=True),
sa.Column('state', sa.Text(), nullable=False),
sa.Column('created_at', UTCDateTime(), nullable=False),
sa.Column('dependencies', postgresql.ARRAY(sa.Text()), nullable=True),
sa.Column('type', sa.Text(), nullable=True),
sa.Column('parameters', JSONString(), nullable=True),
sa.Column('_tasks_graph_fk', sa.Integer(), nullable=False),
sa.Column('_tenant_id', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['_creator_id'], [u'users.id'],
name=op.f('operations__creator_id_fkey'),
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['_tasks_graph_fk'],
[u'tasks_graphs._storage_id'],
name=op.f('operations__tasks_graph_fk_fkey'),
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['_tenant_id'], [u'tenants.id'],
name=op.f('operations__tenant_id_fkey'),
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('_storage_id', name=op.f('operations_pkey'))
)
op.create_index(op.f('operations__tenant_id_idx'), 'operations',
['_tenant_id'], unique=False)
op.create_index(op.f('operations_created_at_idx'), 'operations',
['created_at'], unique=False)
op.create_index(op.f('operations_id_idx'), 'operations', ['id'],
unique=False)
def downgrade():
# Temporary remove the null constraint from `failed_logins_counter`,
# so that restoring old snapshots with null values won't fail.
op.alter_column('users',
'failed_logins_counter',
nullable=True)
op.drop_index(op.f('operations_id_idx'), table_name='operations')
op.drop_index(op.f('operations_created_at_idx'), table_name='operations')
op.drop_index(op.f('operations__tenant_id_idx'), table_name='operations')
op.drop_table('operations')
op.drop_index(op.f('tasks_graphs_id_idx'), table_name='tasks_graphs')
op.drop_index(op.f('tasks_graphs_created_at_idx'),
table_name='tasks_graphs')
op.drop_index(op.f('tasks_graphs__tenant_id_idx'),
table_name='tasks_graphs')
op.drop_table('tasks_graphs')
op.drop_column('executions', 'is_dry_run')
op.drop_column('deployments', 'capabilities')
op.drop_column('events', 'source_id')
op.drop_column('events', 'target_id')
op.drop_column('logs', 'source_id')
op.drop_column('logs', 'target_id')
op.drop_column('executions', 'scheduled_for')
# Remove the agents table
op.drop_index(op.f('agents_id_idx'), table_name='agents')
op.drop_index(op.f('agents_created_at_idx'), table_name='agents')
op.drop_index(op.f('agents__tenant_id_idx'), table_name='agents')
op.drop_table('agents')
op.execute("DROP TYPE agent_states;")
# Add the private_resource column to all resources tables
for table_name in resource_tables:
op.add_column(
table_name,
sa.Column('private_resource', sa.Boolean(), nullable=True)
)
# remove the 'scheduled' value of the execution status enum.
# Since we are downgrading, and in older versions the `schedule` option
# does not exist, we change it to `failed`.
op.execute("""
update executions
set status='failed'
where status='scheduled'
""")
# unfortunately postgres doesn't directly support removing enum values,
# so we create a new type with the correct enum values and swap
# out the old one
op.execute("alter type execution_status rename to execution_status_old")
# create the new type
execution_status = sa.Enum(
'terminated',
'failed',
'cancelled',
'pending',
'started',
'cancelling',
'force_cancelling',
'kill_cancelling',
'queued',
name='execution_status',
)
execution_status.create(op.get_bind())
# update executions to use the new type
op.alter_column(
'executions',
'status',
type_=execution_status,
postgresql_using='status::text::execution_status')
# remove the old type
op.execute("DROP TYPE execution_status_old;")
| {
"repo_name": "cloudify-cosmo/cloudify-manager",
"path": "resources/rest-service/cloudify/migrations/versions/1fbd6bf39e84_4_5_to_4_5_5.py",
"copies": "1",
"size": "11151",
"license": "apache-2.0",
"hash": -6741896631565259000,
"line_mean": 40.4535315985,
"line_max": 79,
"alpha_frac": 0.5834454309,
"autogenerated": false,
"ratio": 3.9361101306036006,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.50195555615036,
"avg_score": null,
"num_lines": null
} |
# 460 - LFU Cache (Hard)
# https://leetcode.com/problems/lfu-cache/
# Implement a Least Frequently Used cache. GODDAMN I almost died.
from collections import OrderedDict, defaultdict
class LFUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
# From key to value.
self.dic = {}
# Times a key has been used.
self.count = {}
# Keys grouped by amount of usage.
# e.g. from a key 2 (as in two times used), get the keys that have been
# used that much times.
self.reverse = defaultdict(lambda: OrderedDict())
# Capacity of the LFU.
self.cap = capacity
def get(self, key):
"""
:type key: int
:rtype: int
"""
# If the key exists. Make sure to put "is not None" otherwise a 0 Value
# will make the condition evaluate to False.
if self.dic.get(key) is not None:
# Update the amount of times key has been used.
prevCount = self.count[key]
newCount = prevCount + 1
self.count[key] = newCount
# Delete the key from the previous grouping of times used.
del self.reverse[prevCount][key]
# If that grouping is now empty, erase it too.
if len(self.reverse[prevCount]) == 0:
del self.reverse[prevCount]
# Insert key into the new grouping of times used.
self.reverse[newCount][key] = True
# Return the value associated to this key.
return self.dic[key]
# If the key doesn't exists, just return -1.
else:
return -1
def set(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
# Check that the value exists, so that it will be updated.
if self.dic.get(key) is not None:
# Times used previously.
prevCount = self.count[key]
# New amount of times used.
newCount = prevCount + 1
# Set the new amount.
self.count[key] = newCount
# Delete the key from the previous grouping of times used.
del self.reverse[prevCount][key]
# If that grouping is now empty, erase it too.
if len(self.reverse[prevCount]) == 0:
del self.reverse[prevCount]
# Insert key into the new grouping of times used.
self.reverse[newCount][key] = True
# Now update the value associated to this key.
self.dic[key] = value
# If the value doesn't exists...
else:
# If capacity will be exceeded, erase the currently least used one.
if len(self.dic) == self.cap and len(self.reverse) > 0:
# Because the "reverse" (from count to keys) dict groups keys
# by accessed amount, lets get the least amount of uses.
leastAmount = sorted(self.reverse.keys())[0]
# Now, because this is an OrderedDict, lets get the least freq
# used key by accessing with the leastAmount of uses value.
leastKey = (self.reverse[leastAmount].keys())[0]
# Delete that number from the grouping of keys used that times.
del self.reverse[leastAmount][leastKey]
# If there are no more keys for this count, delete the count.
if len(self.reverse[leastAmount]) == 0:
del self.reverse[leastAmount]
# Delete the individual amount of uses for the LFU key.
del self.count[leastKey]
# Delete the LFU key and its value.
del self.dic[leastKey]
# Now, insert the new key, with a single usage (the insertion).
if len(self.dic) + 1 <= self.cap:
self.dic[key] = value
self.count[key] = 1
self.reverse[1][key] = True
# Your LFUCache object will be instantiated and called as such:
# obj = LFUCache(capacity)
# param_1 = obj.get(key)
# obj.set(key,value) | {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/03_Hard/lc_460.py",
"copies": "1",
"size": "4146",
"license": "mit",
"hash": -7794249354339301000,
"line_mean": 40.47,
"line_max": 79,
"alpha_frac": 0.5602990835,
"autogenerated": false,
"ratio": 4.234933605720123,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5295232689220123,
"avg_score": null,
"num_lines": null
} |
# 460. LFU Cache
# Design and implement a data structure for Least Frequently Used (LFU) cache. It should support the following operations: get and put.
#
# get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.
# put(key, value) - Set or insert the value if the key is not already present. When the cache reaches its capacity, it should invalidate the least frequently used item before inserting a new item. For the purpose of this problem, when there is a tie (i.e., two or more keys that have the same frequency), the least recently used key would be evicted.
#
# Follow up:
# Could you do both operations in O(1) time complexity?
#
# Example:
#
# LFUCache cache = new LFUCache( 2 /* capacity */ );
#
# cache.put(1, 1);
# cache.put(2, 2);
# cache.get(1); // returns 1
# cache.put(3, 3); // evicts key 2
# cache.get(2); // returns -1 (not found)
# cache.get(3); // returns 3.
# cache.put(4, 4); // evicts key 1.
# cache.get(1); // returns -1 (not found)
# cache.get(3); // returns 3
# cache.get(4); // returns 4
#
class ListNode:
def __init__(self, key, val):
self.prev = None
self.next = None
self.key = key
self.val = val
def connect(self, nextNode):
self.next = nextNode
nextNode.prev = self
class LFUCache:
def __init__(self, capacity):
"""
:type capacity: int
"""
self.cap = capacity
self.head = ListNode(None, None)
self.tail = ListNode(None, None)
self.head.connect(self.tail)
# use to record the first ListNode of this count number
self.cnt = {0: self.tail}
# key: key,val:[listNode, visit count]
self.kv = {None:[self.tail, 0]}
def moveforward(self, key):
node, cnt = self.kv[key]
self.add('tmp', node.val, cnt + 1)
self.remove(key)
self.kv[key] = self.kv['tmp']
self.kv[key][0].key = key
del self.kv['tmp']
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key not in self.kv:
return -1
self.moveforward(key)
return self.kv[key][0].val
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if self.cap == 0:
return
if key in self.kv:
self.kv[key][0].val = value
self.moveforward(key)
return
if len(self.kv) > self.cap:
self.remove(self.tail.prev.key)
self.add(key, value, 0)
def remove(self, key):
node, cnt = self.kv[key]
if self.cnt[cnt] != node:
node.prev.connect(node.next)
elif self.kv[node.next.key][1] == cnt:
node.prev.connect(node.next)
self.cnt[cnt] = self.cnt[cnt].next
else:
node.prev.connect(node.next)
del self.cnt[cnt]
del self.kv[key]
def add(self, key, value, cnt):
if cnt in self.cnt:
loc = self.cnt[cnt]
else:
loc = self.cnt[cnt -1]
node = ListNode(key, value)
loc.prev.connect(node)
node.connect(loc)
self.cnt[cnt] = node
self.kv[key] = [node, cnt]
cache = LFUCache(2)
cache.put(1, 1)
cache.put(2, 2)
print(cache.get(1))
cache.put(3, 3)
print(cache.get(2))
print(cache.get(3))
cache.put(4, 4)
print(cache.get(1))
print(cache.get(3))
print(cache.get(4))
# Your LFUCache object will be instantiated and called as such:
# obj = LFUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
| {
"repo_name": "gengwg/leetcode",
"path": "460_lfu_cache.py",
"copies": "1",
"size": "3669",
"license": "apache-2.0",
"hash": 8116018784754970000,
"line_mean": 27.6640625,
"line_max": 350,
"alpha_frac": 0.5660943036,
"autogenerated": false,
"ratio": 3.267141585040071,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9327197668106475,
"avg_score": 0.0012076441067190593,
"num_lines": 128
} |
# 462. Minimum Moves to Equal Array Elements II
# Given a non-empty integer array, find the minimum number of moves required to make all array elements equal, where a move is incrementing a selected element by 1 or decrementing a selected element by 1.
# You may assume the array's length is at most 10,000.
# Example:
# Input:
# [1,2,3]
# Output:
# 2
# Explanation:
# Only two moves are needed (remember each move increments or decrements one element):
# [1,2,3] => [2,2,3] => [2,2,2]
class Solution(object):
def minMoves2(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
#wmd_val = sorted(nums)[len(nums)/2]
from numpy import median
md_val = median(nums)
#return sum(map(lambda x: abs(x - md_val), nums))
return sum(abs(num - md_val) for num in nums)
class Solution2(object):
def minMoves2(self, nums):
"""
:type nums: List[int]
:rtype: int
solution from kamyu
"""
def kthElement(nums, k):
def PartitionAroundPivot(left, right, pivot_idx, nums):
pivot_value = nums[pivot_idx]
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
for i in xrange(left, right):
if nums[i] > pivot_value:
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
left, right = 0, len(nums) - 1
while left <= right:
pivot_idx = randint(left, right)
new_pivot_idx = PartitionAroundPivot(left, right, pivot_idx, nums)
if new_pivot_idx == k - 1:
return nums[new_pivot_idx]
elif new_pivot_idx > k - 1:
right = new_pivot_idx - 1
else: # new_pivot_idx < k - 1.
left = new_pivot_idx + 1
median = kthElement(nums, len(nums)/2 + 1)
return sum(abs(num - median) for num in nums) | {
"repo_name": "aenon/OnlineJudge",
"path": "leetcode/5.BitManipulation/462.MinimumMovestoEqualArrayElementsII.py",
"copies": "1",
"size": "2223",
"license": "mit",
"hash": -5596607523155980000,
"line_mean": 33.2153846154,
"line_max": 204,
"alpha_frac": 0.5339631129,
"autogenerated": false,
"ratio": 3.579710144927536,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4613673257827536,
"avg_score": null,
"num_lines": null
} |
# 463 - Island Perimeter (Easy)
# https://leetcode.com/problems/island-perimeter/
from collections import deque
class Solution(object):
def BFS(self, grid, w, h, i, j):
# Doing ([-1] * h) * w will repeat the same list pointer, bad!
visited = [[-1] * h for _ in range(w)]
dq = deque([(i, j)])
res = 0
while len(dq) > 0:
pos = dq.popleft()
x, y = pos[0], pos[1]
if visited[x][y] == 1:
continue
visited[x][y] = 1
for _i, _j in [[-1, 0], [1, 0], [0, -1], [0, 1]]:
newX = x + _i
newY = y + _j
# Increase perimeter value only if next squares are either
# boundaries, or not land.
if any([newX < 0, newX >= w, newY < 0, newY >= h]):
res += 1
continue
if grid[newX][newY] != 1:
res += 1
continue
dq.append((newX, newY))
return res
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
width = len(grid)
height = len(grid[0])
for i in range(width):
for j in range(height):
if grid[i][j] == 1:
return self.BFS(grid, width, height, i, j)
| {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/01_Easy/lc_463.py",
"copies": "1",
"size": "1436",
"license": "mit",
"hash": -7995183621666085000,
"line_mean": 34.0243902439,
"line_max": 78,
"alpha_frac": 0.4129526462,
"autogenerated": false,
"ratio": 3.7395833333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4652535979533333,
"avg_score": null,
"num_lines": null
} |
# 463. Island Perimeter
# You are given a map in form of a two-dimensional integer grid
# where 1 represents land and 0 represents water.
# Grid cells are connected horizontally/vertically (not diagonally).
# The grid is completely surrounded by water, and there is exactly one island
# (i.e., one or more connected land cells). The island doesn't have "lakes"
# (water inside that isn't connected to the water around the island).
# One cell is a square with side length 1.
# The grid is rectangular, width and height don't exceed 100.
# Determine the perimeter of the island.
# Example:
# [[0,1,0,0],
# [1,1,1,0],
# [0,1,0,0],
# [1,1,0,0]]
# Answer: 16
# Explanation: The perimeter is the 16 yellow stripes in the image below:
class Solution(object):
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
https://leetcode.com/problems/island-perimeter/discuss/95001/clear-and-easy-java-solution
1. loop over the matrix and count the number of islands;
2. if the current dot is an island, count if it has any right neighbour or down neighbour;
3. the result is islands * 4 - neighbours * 2
"""
islands = 0
neighbours = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
islands += 1
if i < len(grid)-1 and grid[i+1][j] == 1:
neighbours += 1
if j < len(grid[0])-1 and grid[i][j+1] == 1:
neighbours += 1
return islands * 4 - neighbours * 2
s = Solution()
grid = [[0,1,0,0],
[1,1,1,0],
[0,1,0,0],
[1,1,0,0]]
print(s.islandPerimeter(grid))
| {
"repo_name": "gengwg/leetcode",
"path": "463_island_perimeter.py",
"copies": "1",
"size": "1752",
"license": "apache-2.0",
"hash": 4439213260097270300,
"line_mean": 32.7115384615,
"line_max": 98,
"alpha_frac": 0.5936073059,
"autogenerated": false,
"ratio": 3.208791208791209,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4302398514691209,
"avg_score": null,
"num_lines": null
} |
# 469. Convex Polygon
# Given a list of points that form a polygon when joined sequentially,
# find if this polygon is convex (Convex polygon definition).
#
# Note:
#
# There are at least 3 and at most 10,000 points.
# Coordinates are in the range -10,000 to 10,000.
# You may assume the polygon formed by given points is always a simple polygon (Simple polygon definition). In other words, we ensure that exactly two edges intersect at each vertex, and that edges otherwise don't intersect each other.
#
# Example 1:
#
# [[0,0],[0,1],[1,1],[1,0]]
#
# Answer: True
#
# Explanation:
#
# Example 2:
#
# [[0,0],[0,10],[10,10],[10,0],[5,5]]
#
# Answer: False
#
# Explanation:
class Solution(object):
def isConvex(self, points):
"""
:type points: List[List[int]]
:rtype: bool
"""
def crossProduct(p0, p1, p2):
x0, y0 = p0
x1, y1 = p1
x2, y2 = p2
return (x2-x0) * (y1-y0) - (x1-x0) * (y2-y0)
size = len(points)
last = 0
for i in range(size):
p0, p1, p2 = points[i], points[(i+1) % size], points[(i+2) % size]
p = crossProduct(p0, p1, p2)
if p * last < 0:
return False
last = p
return True
print(Solution().isConvex([[0,0],[0,1],[1,1],[1,0]]))
print(Solution().isConvex([[0,0],[0,10],[10,10],[10,0],[5,5]]))
| {
"repo_name": "gengwg/leetcode",
"path": "469_convex_polygon.py",
"copies": "1",
"size": "1405",
"license": "apache-2.0",
"hash": 2459065192681128400,
"line_mean": 26.5490196078,
"line_max": 239,
"alpha_frac": 0.5580071174,
"autogenerated": false,
"ratio": 3.002136752136752,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4060143869536752,
"avg_score": null,
"num_lines": null
} |
# 474. Ones and Zeroes
# In the computer world, use restricted resource you have to generate maximum benefit is what we always want to pursue.
#
# For now, suppose you are a dominator of m 0s and n 1s respectively.
# On the other hand, there is an array with strings consisting of only 0s and 1s.
#
# Now your task is to find the maximum number of strings that you can form with given m 0s and n 1s.
# Each 0 and 1 can be used at most once.
#
# Note:
#
# The given numbers of 0s and 1s will both not exceed 100
# The size of given string array won't exceed 600.
#
# Example 1:
#
# Input: Array = {"10", "0001", "111001", "1", "0"}, m = 5, n = 3
# Output: 4
#
# Explanation: This are totally 4 strings can be formed by the using of 5 0s and 3 1s, which are “10,”0001”,”1”,”0”
#
# Example 2:
#
# Input: Array = {"10", "0", "1"}, m = 1, n = 1
# Output: 2
#
# Explanation: You could form "10", but then you'd have nothing left. Better form "0" and "1".
# https://leetcode.com/problems/ones-and-zeroes/discuss/95814/c++-DP-solution-with-comments
class Solution(object):
# https://blog.csdn.net/fuxuemingzhu/article/details/82825032
# 看到这个题第一个感觉是贪心,但是想了想,
# 无论是贪心少的还是贪心多的,都会影响到后面选取的变化,所以不行。
# 遇到这种求最多或最少的次数的,并且不用求具体的解决方案,一般都是使用DP。
# 这个DP很明白了,定义一个数组dp[m+1][n+1],代表m个0, n个1能组成的最长字符串。
# 遍历每个字符串统计出现的0和1得到zeros和ones,
# 所以第dp[i][j]的位置等于dp[i][j]和dp[i - zeros][j - ones] + 1。
# 其中dp[i - zeros][j - ones]表示如果取了当前的这个字符串,那么剩下的可以取的最多的数字。
# 时间复杂度有点难计算,大致是O(MN * L), L 是数组长度,空间复杂度是O(MN).
# TLE
def findMaxForm(self, strs, m, n):
"""
:type strs: List[str]
:type m: int
:type n: int
:rtype: int
"""
dp = [[0 for _ in range(n+1)] for _ in range(m+1)]
for s in strs:
zeros, ones = 0, 0
# count zeros and ones
for c in s:
if c == '0':
zeros += 1
elif c == '1':
ones += 1
# dp[i][j] = the max number of strings that can be formed with i 0's and j 1's
# from the first few strings up to the current string s
# Catch: have to go from bottom right to top left
# Why? If a cell in dp is updated (because s is selected),
# we should be adding 1 to dp[i][j] from the previous iteration (when we were not considering s)
# If we go from top left to bottom right, we would be using results from this iteration => overcounting
for i in range(m, zeros-1, -1):
for j in range(n, ones-1, -1):
dp[i][j] = max(dp[i][j], dp[i-zeros][j-ones] + 1)
return dp[m][n]
print(Solution().findMaxForm(["10", "0001", "111001", "1", "0"], 5, 3))
print(Solution().findMaxForm(["10", "0", "1"], 1, 1))
| {
"repo_name": "gengwg/leetcode",
"path": "474_ones_and_zeros.py",
"copies": "1",
"size": "3159",
"license": "apache-2.0",
"hash": 7661569853014096000,
"line_mean": 36.602739726,
"line_max": 119,
"alpha_frac": 0.604007286,
"autogenerated": false,
"ratio": 2.40578439964943,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8495054344185335,
"avg_score": 0.0029474682928191023,
"num_lines": 73
} |
# 475. Heaters
# Winter is coming! Your first job during the contest is to design a standard heater
# with fixed warm radius to warm all the houses.
#
# Now, you are given positions of houses and heaters on a horizontal line,
# find out minimum radius of heaters so that all houses could be covered by those heaters.
#
# So, your input will be the positions of houses and heaters seperately,
# and your expected output will be the minimum radius standard of heaters.
#
# Note:
#
# Numbers of houses and heaters you are given are non-negative and will not exceed 25000.
# Positions of houses and heaters you are given are non-negative and will not exceed 10^9.
# As long as a house is in the heaters' warm radius range, it can be warmed.
# All the heaters follow your radius standard and the warm radius will the same.
#
# Example 1:
#
# Input: [1,2,3],[2]
# Output: 1
# Explanation: The only heater was placed in the position 2,
# and if we use the radius 1 standard, then all the houses can be warmed.
#
# Example 2:
#
# Input: [1,2,3,4],[1,4]
# Output: 1
# Explanation: The two heater was placed in the position 1 and 4.
# We need to use radius 1 standard, then all the houses can be warmed.
#
class Solution(object):
# https://leetcode.com/problems/heaters/discuss/95878/10-lines-python-with-easy-understanding
# Add two imaginary heaters at the infinite, then any house can be always between two heaters.
# Find the shortest distance of the two and compare it to the answer.
def findRadius(self, houses, heaters):
"""
:type houses: List[int]
:type heaters: List[int]
:rtype: int
"""
houses.sort()
heaters.sort()
heaters = [float('-inf')] + heaters + [float('inf')]
ans, i = 0, 0
for house in houses:
while house > heaters[i+1]:
i += 1
dis = min(house - heaters[i], heaters[i+1] - house)
ans = max(ans, dis)
return ans
print(Solution().findRadius([1,2,3],[2]))
print(Solution().findRadius([1,2,3,4],[2]))
| {
"repo_name": "gengwg/leetcode",
"path": "475_heaters.py",
"copies": "1",
"size": "2085",
"license": "apache-2.0",
"hash": -8870378207636530000,
"line_mean": 34.9482758621,
"line_max": 98,
"alpha_frac": 0.6633093525,
"autogenerated": false,
"ratio": 3.2476635514018692,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9390242956671995,
"avg_score": 0.0041459894459749845,
"num_lines": 58
} |
"""47. Permutations II
https://leetcode.com/problems/permutations-ii/
Given a collection of numbers that might contain duplicates,
return all possible unique permutations.
Example:
Input: [1,1,2]
Output:
[
[1,1,2],
[1,2,1],
[2,1,1]
]
[]: []
[1]: [1]
[1,2]: [1, 2], [2, 1]
[1,2,1]: [1,1,2], [1,2,1], [2,1,1]
1,1,2,1,1
"""
import copy
from typing import List
class Solution:
def permute_unique_dfs(self, nums: List[int]) -> List[List[int]]:
ans = []
def dfs(permutation: List[int], nums: List[int]):
nonlocal ans
if not nums:
ans.append(permutation)
else:
used = []
for i in range(len(nums)):
if nums[i] not in used:
used.append(nums[i])
dfs(permutation + [nums[i]], nums[:i] + nums[i + 1:])
dfs([], nums)
return ans
def permute_unique(self, nums: List[int]) -> List[List[int]]:
length = len(nums)
if length == 0:
return [[]]
ans = []
pre_nums = nums[:-1]
pre_ans = self.permute_unique(pre_nums)
is_dup = nums[-1] in pre_nums
for permutation in pre_ans:
for i in range(length):
temp = copy.deepcopy(permutation)
temp.insert(i, nums[-1])
if is_dup and ans:
if temp in ans:
continue
ans.append(temp)
return ans
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/permutations_ii.py",
"copies": "1",
"size": "1512",
"license": "mit",
"hash": -3907643228500821500,
"line_mean": 23,
"line_max": 77,
"alpha_frac": 0.4854497354,
"autogenerated": false,
"ratio": 3.4285714285714284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9414021163971429,
"avg_score": 0,
"num_lines": 63
} |
"""48. Rotate Image
https://leetcode.com/problems/rotate-image/
You are given an n x n 2D matrix representing an image.
Rotate the image by 90 degrees (clockwise).
Note:
You have to rotate the image in-place, which means you have to modify the input
2D matrix directly. DO NOT allocate another 2D matrix and do the rotation.
Example 1:
Given input matrix =
[
[1,2,3],
[4,5,6],
[7,8,9]
],
rotate the input matrix in-place such that it becomes:
[
[7,4,1],
[8,5,2],
[9,6,3]
]
Example 2:
Given input matrix =
[
[ 5, 1, 9,11],
[ 2, 4, 8,10],
[13, 3, 6, 7],
[15,14,12,16]
],
rotate the input matrix in-place such that it becomes:
[
[15,13, 2, 5],
[14, 3, 4, 1],
[12, 6, 8, 9],
[16, 7,10,11]
]
"""
from typing import List
class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
n = len(matrix)
for i in range((n + 1) // 2):
for j in range(i, n - 1 - i):
temp = matrix[i][j]
matrix[i][j] = matrix[n - 1 - j][i]
matrix[n - 1 - j][i] = matrix[n - 1 - i][n - 1 - j]
matrix[n - 1 - i][n - 1 - j] = matrix[j][n - 1 - i]
matrix[j][n - 1 - i] = temp
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/rotate_image.py",
"copies": "1",
"size": "1290",
"license": "mit",
"hash": 2743466968207842300,
"line_mean": 18.5454545455,
"line_max": 79,
"alpha_frac": 0.5356589147,
"autogenerated": false,
"ratio": 2.816593886462882,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3852252801162882,
"avg_score": null,
"num_lines": null
} |
# 4/9/2014
# Charles O. Goddard
"""
craigslist: Functions for scraping Craigslist postings.
"""
import dateutil.parser
import unicodedata
import requests
import bs4
def sanitize(text):
return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore')
class Posting(object):
"""
A posting on craigslist.
"""
def __init__(self, url, title=None):
self.url = url
self._title = sanitize(title).strip()
self._body = None
self._posted = None
self.id = int(url.split('/')[-1].split('.')[0])
def fetch_details(self):
"""
Fetch posting details (title, body text) from Craigslist.
"""
# Make HTTP request
r = requests.get(self.url)
if r.status_code != 200:
return False
# Parse returned HTML
soup = bs4.BeautifulSoup(r.text, 'html5lib')
if not soup.title.string:
# Post has been deleted
self._body = '<DELETED>'
self._posted = -1
return False
if self._title is None:
self._title = sanitize(soup.title.string).strip()
postingbody = soup.find('section', id='postingbody')
if postingbody:
try:
self._body = ' '.join([sanitize(unicode(x)) for x in postingbody.contents]).replace('<br/>','\n').strip()
except UnicodeEncodeError, e:
print self.url
print postingbody
raise
else:
self._body = None
posted = soup.time.get('datetime')
self._posted = dateutil.parser.parse(posted)
return True
@property
def title(self):
if self._title is None:
self.fetch_details()
return self._title
@property
def body(self):
if self._body is None:
self.fetch_details()
return self._body
@property
def posted(self):
if self._posted is None:
self.fetch_details()
return self._posted
def __repr__(self):
return '{!s}({!r})'.format(type(self).__name__, self.url)
def __str__(self):
return '{} ({})'.format(self.title, self.url)
def postings(location='boston', section='cas'):
'''
List all craigslist postings for a given location and section.
Returns a generator yielding (url, title) pairs.
'''
base_url = 'http://{0}.craigslist.org'.format(location)
idx = 0
while True:
# Get next hundred postings
url = '{0}/{1}/index{2:03}.html'.format(base_url, section, idx * 100)
r = requests.get(url)
if r.status_code != 200:
raise ValueError(r.status_code)
# Parse HTML
soup = bs4.BeautifulSoup(r.text, 'html5lib')
# Find and yield postings
for span in soup.find_all('span', 'pl'):
a = span.a
url = a.get('href')
if not url.startswith(base_url):
url = base_url + url
yield Posting(url, a.string)
idx += 1
| {
"repo_name": "cg123/craigslist-cupid",
"path": "craigslist.py",
"copies": "1",
"size": "2551",
"license": "bsd-3-clause",
"hash": -4500231624835522000,
"line_mean": 20.8034188034,
"line_max": 109,
"alpha_frac": 0.654253234,
"autogenerated": false,
"ratio": 2.9559675550405564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8873563887597853,
"avg_score": 0.04733138028854055,
"num_lines": 117
} |
# 49
from collections import defaultdict
from prime import prime
class Solve(object):
def __init__(self):
self.pdict = defaultdict(list)
self.primes = list(prime.primes_between(end=10000))
def solve(self):
def find_eq_diff_subseq(l):
if len(l) < 3:
return False
subseqs = []
for first_idx, first in enumerate(l):
for second in l[first_idx+1:]:
eq_diff_seq = [first, second]
delta = second - first
nxt = second + delta
while nxt in l:
eq_diff_seq.append(nxt)
nxt += delta
if len(eq_diff_seq) >= 3:
subseqs.append(eq_diff_seq)
return subseqs
for p in self.primes:
if p > 1000:
key = ''.join(sorted(str(p)))
self.pdict[key].append(p)
for k, value in self.pdict.iteritems():
eq_diff_prime = find_eq_diff_subseq(value)
if eq_diff_prime:
print eq_diff_prime
s = Solve()
s.solve()
| {
"repo_name": "daicang/Euler",
"path": "p49.py",
"copies": "1",
"size": "1168",
"license": "mit",
"hash": 1908258023264839200,
"line_mean": 26.1627906977,
"line_max": 59,
"alpha_frac": 0.4700342466,
"autogenerated": false,
"ratio": 3.986348122866894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4956382369466894,
"avg_score": null,
"num_lines": null
} |
# 4BitVerticalBargraph2x.py
#
# A DEMO 4 bit slow analogue bargraph generator in colour for STANDARD Python 2.6.x and Linux...
# This is a vertical version of the horizontal one also given away by myself.
# It is written so that anyone can understand how it works.
#
# (Original copyright, (C)2011, B.Walker, G0LCU.)
# Issued initially to LXF as Public Domain, and to other sites later.
#
# Saved as 4BitVerticalBargraph2x.py wherever you like.
#
# This DEMO goes from safe green, to warning amber, to danger red, with a crirical
# error beep above 14 on the vertical scale...
# It is a slow "AT A GLANCE" display for quick assessments, not for accuracy.
#
# Two system commands are required, "clear" and "setterm", for this to work.
# I assume that these are available on all recent and current Linux distros.
# The device /dev/audio is used so this must be free also.
#
# It is useful for quick "AT A GLANCE" readings from say an 8 bit ADC used as a simple
# voltmeter, ammeter, etc. Getting a digital readout is SO simple I left it out this time...
#
# To run use the following from inside a Python prompt...
# >>> exec(open("/full/path/to/code/4BitVerticalBargraph2x.py").read())
# OR...
# >>> execfile("/full/path/to/code/4BitVerticalBargraph2x.py")
#
# This looks like an "LED" style "VU" display...
# Add the required imports for this DEMO.
import os
import random
import time
# Just for this DEMO set up variables as global...
global count
global row
global blank
global greenlines
global yellowlines
global redlines
global waveform
# Startup variable values here.
count=0
row=0
blank="(C)2011, B.Walker, G0LCU."
greenlines=blank
yellowlines=blank
redlines=blank
# This is a square wave binary for the critical error beep.
waveform=chr(15)+chr(45)+chr(63)+chr(45)+chr(15)+chr(3)+chr(0)+chr(3)
def main():
# Disable the cursor as it looks much nicer... ;o)
os.system("setterm -cursor off")
while 1:
# Run continuously and use Ctrl-C to STOP!
count=15
blank="\033[0m "
# Generate a byte value as though grabbed from a serial, parallel or USB port.
row=int(random.random()*256)
# Now divide by 16 to simulate a 4 bit value.
row=int(row/16)
# Although this should never occur, don't allow any error.
if row>=15: row=15
if row<=0: row=0
while count>=0:
# Do a full, clean, clear screen and start looping.
os.system("clear"),chr(13)," ",chr(13),
print "\033[0mFour Bit Level Vertical Analogue Bar Graph Display..."
print "Original copyright, (C)2011, B.Walker, G0LCU."
print "Issued to LXF on 24-04-2011 as Public Domain."
print
print blank+"\033[1;31m15 __ "
redlines=blank+"\033[1;31m14 __ "
if row>=15: redlines=redlines+unichr(0x2588)+unichr(0x2588)
count=count-1
print redlines
redlines=blank+"\033[1;31m13 __ "
if row>=14: redlines=redlines+unichr(0x2588)+unichr(0x2588)
count=count-1
print redlines
yellowlines=blank+"\033[1;33m12 __ "
if row>=13: yellowlines=yellowlines+unichr(0x2588)+unichr(0x2588)
count=count-1
print yellowlines
yellowlines=blank+"\033[1;33m11 __ "
if row>=12: yellowlines=yellowlines+unichr(0x2588)+unichr(0x2588)
count=count-1
print yellowlines
yellowlines=blank+"\033[1;33m10 __ "
if row>=11: yellowlines=yellowlines+unichr(0x2588)+unichr(0x2588)
count=count-1
print yellowlines
greenlines=blank+"\033[1;32m 9 __ "
if row>=10: greenlines=greenlines+unichr(0x2588)+unichr(0x2588)
count=count-1
print greenlines
greenlines=blank+"\033[1;32m 8 __ "
if row>=9: greenlines=greenlines+unichr(0x2588)+unichr(0x2588)
count=count-1
print greenlines
greenlines=blank+"\033[1;32m 7 __ "
if row>=8: greenlines=greenlines+unichr(0x2588)+unichr(0x2588)
count=count-1
print greenlines
greenlines=blank+"\033[1;32m 6 __ "
if row>=7: greenlines=greenlines+unichr(0x2588)+unichr(0x2588)
count=count-1
print greenlines
greenlines=blank+"\033[1;32m 5 __ "
if row>=6: greenlines=greenlines+unichr(0x2588)+unichr(0x2588)
count=count-1
print greenlines
greenlines=blank+"\033[1;32m 4 __ "
if row>=5: greenlines=greenlines+unichr(0x2588)+unichr(0x2588)
count=count-1
print greenlines
greenlines=blank+"\033[1;32m 3 __ "
if row>=4: greenlines=greenlines+unichr(0x2588)+unichr(0x2588)
count=count-1
print greenlines
greenlines=blank+"\033[1;32m 2 __ "
if row>=3: greenlines=greenlines+unichr(0x2588)+unichr(0x2588)
count=count-1
print greenlines
greenlines=blank+"\033[1;32m 1 __ "
if row>=2: greenlines=greenlines+unichr(0x2588)+unichr(0x2588)
count=count-1
print greenlines
greenlines=blank+"\033[1;32m 0 __ "
if row>=1: greenlines=greenlines+unichr(0x2588)+unichr(0x2588)
count=count-1
if row==0: greenlines=greenlines+"__"
count=count-1
print greenlines
# Reset to default colours...
print
print "\033[0mPress Ctrl-C to stop..."
if row<=14: time.sleep(1)
if row==15:
# Set audio timing to zero, "0".
count=0
# Open up the audio device to write to.
# This could be /dev/dsp also...
audio=open("/dev/audio", "wb")
# A "count" value of 1 = 1mS, so 1000 = 1S.
while count<=1000:
# Send 8 bytes of data to the audio device 1000 times.
# This is VERY close to 1KHz and almost sinewave.
audio.write(waveform)
count=count+1
# Close the audio device access.
audio.close()
# Enable the cursor again if it ever gets here... ;oO
os.system("setterm -cursor on")
main()
# End of DEMO...
# Enjoy finding simple solutions to often very difficult problems...
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/577675_Four_Bit_Vertical_Coloured_Analogue_Bar_Graph/recipe-577675.py",
"copies": "1",
"size": "5615",
"license": "mit",
"hash": -4885549676701900000,
"line_mean": 30.0220994475,
"line_max": 96,
"alpha_frac": 0.6940338379,
"autogenerated": false,
"ratio": 2.916883116883117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41109169547831165,
"avg_score": null,
"num_lines": null
} |
## 4. Column split selection ##
def find_best_column(data, target_name, columns):
# Fill in the logic here to automatically find the column in columns to split on.
# data is a dataframe.
# target_name is the name of the target variable.
# columns is a list of potential columns to split on.
information_gains = []
# Loop through and compute information gains.
for col in columns:
information_gain = calc_information_gain(data, col, "high_income")
information_gains.append(information_gain)
# Find the name of the column with the highest gain.
highest_gain_index = information_gains.index(max(information_gains))
highest_gain = columns[highest_gain_index]
return highest_gain
# A list of columns to potentially split income with.
columns = ["age", "workclass", "education_num", "marital_status", "occupation", "relationship", "race", "sex", "hours_per_week", "native_country"]
income_split = find_best_column(income,'high_income',columns)
## 5. Creating a simple recursive algorithm ##
# We'll use lists to store our labels for nodes (when we find them).
# Lists can be accessed inside our recursive function, whereas integers can't.
# Look at the python missions on scoping for more information on this.
label_1s = []
label_0s = []
def id3(data, target, columns):
# The pandas.unique method will return a list of all the unique values in a Series.
unique_targets = pandas.unique(data[target])
if len(unique_targets) == 1:
# Insert code here to append 1 to label_1s or 0 to label_0s based on what we should label the node.
# See lines 2 and 3 in the algorithm.
# Returning here is critical -- if we don't, the recursive tree will never finish, and run forever.
# See our example above for when we returned.
if unique_targets == 1:
label_1s.append(1)
else:
label_0s.append(0)
return
# Find the best column to split on in our data.
best_column = find_best_column(data, target, columns)
# Find the median of the column.
column_median = data[best_column].median()
# Create the two splits.
left_split = data[data[best_column] <= column_median]
right_split = data[data[best_column] > column_median]
# Loop through the splits and call id3 recursively.
for split in [left_split, right_split]:
# Call id3 recursively to process each branch.
id3(split, target, columns)
# Create the dataset that we used in the example in the last screen.
data = pandas.DataFrame([
[0,20,0],
[0,60,2],
[0,40,1],
[1,25,1],
[1,35,2],
[1,55,1]
])
# Assign column names to the data.
data.columns = ["high_income", "age", "marital_status"]
# Call the function on our data to set the counters properly.
id3(data, "high_income", ["age", "marital_status"])
## 6. Storing the tree ##
# Create a dictionary to hold the tree. This has to be outside the function so we can access it later.
tree = {}
# This list will let us number the nodes. It has to be a list so we can access it inside the function.
nodes = []
def id3(data, target, columns, tree):
unique_targets = pandas.unique(data[target])
# Assign the number key to the node dictionary.
nodes.append(len(nodes) + 1)
tree["number"] = nodes[-1]
if len(unique_targets) == 1:
# Insert code here to assign the "label" field to the node dictionary.
if unique_targets == 1:
tree["label"] = 1
else:
tree["label"] = 0
return
best_column = find_best_column(data, target, columns)
column_median = data[best_column].median()
# Insert code here to assign the "column" and "median" fields to the node dictionary.
tree["column"] = best_column
tree["median"] = column_median
left_split = data[data[best_column] <= column_median]
right_split = data[data[best_column] > column_median]
split_dict = [["left", left_split], ["right", right_split]]
for name, split in split_dict:
tree[name] = {}
id3(split, target, columns, tree[name])
# Call the function on our data to set the counters properly.
id3(data, "high_income", ["age", "marital_status"], tree)
## 7. A prettier tree ##
def print_with_depth(string, depth):
# Add space before a string.
prefix = " " * depth
# Print a string, appropriately indented.
print("{0}{1}".format(prefix, string))
def print_node(tree, depth):
# Check for the presence of label in the tree.
if "label" in tree:
# If there's a label, then this is a leaf, so print it and return.
print_with_depth("Leaf: Label {0}".format(tree["label"]), depth)
# This is critical -- without it, you'll get infinite recursion.
return
# Print information about what the node is splitting on.
print_with_depth("{0} > {1}".format(tree["column"], tree["median"]), depth)
# Create a list of tree branches.
branches = [tree["left"], tree["right"]]
# Insert code here to recursively call print_node on each branch.
# Don't forget to increment depth when you pass it in!
for b in branches:
print_node(b, depth+1)
print_node(tree, 0)
## 9. Automatic predictions ##
def predict(tree, row):
if "label" in tree:
return tree["label"]
column = tree["column"]
median = tree["median"]
# Insert code here to check if row[column] is less than or equal to median
# If it's less than or equal, return the result of predicting on the left branch of the tree
# If it's greater, return the result of predicting on the right branch of the tree
# Remember to use the return statement to return the result!
if row[column] <= median:
return predict(tree["left"], row)
else:
return predict(tree["right"],row)
# Print the prediction for the first row in our data.
print(predict(tree, data.iloc[0]))
## 10. Making multiple predictions ##
new_data = pandas.DataFrame([
[40,0],
[20,2],
[80,1],
[15,1],
[27,2],
[38,1]
])
# Assign column names to the data.
new_data.columns = ["age", "marital_status"]
def batch_predict(tree, df):
return df.apply(lambda y: predict(tree,y), axis = 1)
predictions = batch_predict(tree, new_data) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Decision Trees/Building a decision tree-141.py",
"copies": "1",
"size": "6391",
"license": "mit",
"hash": -3360286781119124000,
"line_mean": 33.7391304348,
"line_max": 146,
"alpha_frac": 0.6446565483,
"autogenerated": false,
"ratio": 3.6899538106235568,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4834610358923557,
"avg_score": null,
"num_lines": null
} |
# 4 December 2015
import argparse
import os
import re
import shlex
import subprocess
import sys
def error(*msg, code=1):
print("error:", *msg, file=sys.stderr)
exit(code)
extensions = {"mp3", "aac", "mka", "dts", "flac", "ogg", "m4a", "ac3", "opus", "wav", "aiff", "aif"}
_re_ext = r'(' + r'|'.join(extensions) + r')'
def _patgen_title(query):
query = r'[^/]*'.join(re.escape(p) for p in query.split())
return query + r'[^/]*\.' + _re_ext + r'$'
def _patgen_album(query):
query = r'[^/]*'.join(re.escape(p) for p in query.split())
return query + r'[^/]*/.*\.' + _re_ext + r'$'
def _patgen_playlist(query):
gen_query = r'[^/]*'.join(re.escape(p) for p in query.split())
if not query.endswith(".txt"):
gen_query += r'[^/]*\.txt$'
else:
gen_query += r'[^/]*$'
return gen_query
def _patgen_general(query):
# all slashes must be explicit, but spaces still count as wildcard
segs = (p.split() for p in query.split('/'))
query = r'.*/.*'.join(r'[^/]*'.join(re.escape(p) for p in s) for s in segs)
return query + r'.*\.' + _re_ext + r'$'
pattern_generators = {
'@': _patgen_title,
'@@': _patgen_album,
'%': _patgen_playlist,
'$': _patgen_general,
}
sorted_patterns = list(pattern_generators.items())
sorted_patterns.sort(key=lambda pair: len(pair[0]), reverse=True)
loading_sentinent = object()
class Searcher:
def __init__(self, *, music_dir=None, playlist_dir=None, debug=False, quiet=False):
self.debug_flag = debug
self.quiet = quiet
if music_dir is None:
music_dir = os.environ.get('MUSPLAY_MUSIC')
if music_dir is None:
error("missing environment variable MUSPLAY_MUSIC", code=2)
if playlist_dir is None:
playlist_dir = os.environ.get('MUSPLAY_PLAYLISTS')
if playlist_dir is None:
playlist_dir = os.path.join(music_dir, 'Playlists')
if not os.path.exists(playlist_dir):
playlist_dir = music_dir
elif not os.path.exists(playlist_dir):
self.warn("MUSPLAY_PLAYLISTS folder doesn't exist {!r}".format(playlist_dir))
self.music_dir = music_dir
self.playlist_dir = playlist_dir
self.loaded_playlists = {}
self.paths = []
def debug(self, *msg):
if self.debug_flag:
print("debug:", *msg, file=sys.stderr)
def warn(self, *msg):
if not self.quiet:
print("warning:", *msg, file=sys.stderr)
def call_searcher(self, pattern, folder):
cmd = ['find', '-Ef', folder, '--', '-iregex', '.*' + pattern + '.*']
self.debug(' '.join(shlex.quote(arg) for arg in cmd))
result = subprocess.run(cmd, stdout=subprocess.PIPE)
if result.returncode == 0:
return result.stdout.decode('utf-8').strip().split('\n')
else:
return None
def find_tracks(self, patterns):
"""Attempts to find the music tracks by the given patterns"""
paths = []
for pattern in patterns:
if not pattern:
continue
# See if pattern matches one of the prefixes
match = None
for prefix, gen in sorted_patterns:
if pattern.startswith(prefix):
match = (prefix, gen)
break
if match:
prefix, gen = match
pat = gen(pattern[len(prefix):].lstrip())
self.debug("match {} => {!r} ({!r})".format(prefix, pattern, pat))
if prefix == '%':
# special playlist search
result = self.call_searcher(pat, self.playlist_dir)
self.debug("playlist search result: {!r}".format(result))
if result:
res = []
for playlist in result:
res += self.parse_playlist(playlist)
result = res
else:
result = self.call_searcher(pat, self.music_dir)
if result:
paths += result
else:
self.warn("no tracks found for pattern {!r}".format(pattern))
continue
# Otherwise it must be a simple path
ext = os.path.splitext(pattern)[1]
if ext == '.txt':
pattern = os.path.join(self.playlist_dir, pattern)
paths += self.parse_playlist(pattern)
continue
if ext[1:] in extensions:
pattern = os.path.join(self.music_dir, pattern)
paths.append(pattern)
continue
self.warn("ignoring unknown extension {!r} for pattern {!r}".format(ext, pattern))
return paths
def parse_playlist(self, playlist):
playlist = os.path.realpath(playlist)
cached = self.loaded_playlists.get(playlist)
if cached is loading_sentinent:
self.warn("recursive playlists are not supported")
return []
if cached:
self.debug("using cache for {!r}".format(playlist))
return self.loaded_playlists[playlist]
self.loaded_playlists[playlist] = loading_sentinent
self.debug("trying to parse {!r}".format(playlist))
try:
with open(playlist, 'r') as f:
data = f.read()
except IOError:
self.warn("could not read playlist file {!r}".format(playlist))
return []
patterns = []
for line in data.split('\n'):
line = line.strip()
if line.startswith('#'):
continue
patterns.append(line)
if not patterns:
self.warn("no patterns in playlist file: {!r}".format(playlist))
paths = self.find_tracks(patterns)
self.loaded_playlists[playlist] = paths
return paths
description = """
Find music tracks by track and album titles.
environment variables:
MUSPLAY_MUSIC where to find music tracks (required)
MUSPLAY_PLAYLISTS where to find playlists
(default: $MUSPLAY_MUSIC/Playlists)
"""
epilog="""
pattern prefixes:
@ search by track title (filename minus extension)
@@ search by album title (directory name)
% search for playlists in the playlist directory (see above)
$ search by the entire path to the file
no prefix use pattern as a literal path to a file or playlist
"""
def main(args=sys.argv[1:]):
parser = argparse.ArgumentParser(description=description, epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("pattern", nargs="+",
help="the patterns to search with (see pattern prefixes below)")
parser.add_argument("-d", "--debug", action="store_true", default=False,
help="print extra information for debugging")
parser.add_argument("-q", "--quiet", action="store_true", default=False,
help="suppress non-fatal warnings")
parser.add_argument("--exclude", metavar="pattern", nargs="+",
help="exclude anything matched by the given patterns")
parsed = parser.parse_args(args)
searcher = Searcher(debug=parsed.debug, quiet=parsed.quiet)
paths = searcher.find_tracks(parsed.pattern)
if parsed.exclude:
excluded = set(searcher.find_tracks(parsed.exclude))
paths = (p for p in paths if not p in excluded)
for path in paths:
print(path)
if __name__ == '__main__':
main()
| {
"repo_name": "Machtan/musplay",
"path": "search.py",
"copies": "1",
"size": "7668",
"license": "mit",
"hash": -1483195589935731700,
"line_mean": 31.218487395,
"line_max": 100,
"alpha_frac": 0.5594679186,
"autogenerated": false,
"ratio": 3.9709994821336094,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5030467400733609,
"avg_score": null,
"num_lines": null
} |
''' 4-error_evolution.py
=========================
AIM: Similarly to 1-orbits_computed.py, checks the error evolution of the computed orbits
INPUT: files:
- all flux_*.dat files in <orbit_id>_flux/
- <orbit_id>_misc/orbits.dat
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_misc/ : one file 'orbits.dat' containing the list
CMD: python 4-error_evolution.py
ISSUES: <none known>
REQUIRES:- standard python libraries, specific libraries in resources/
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/ --> figures
* <orbit_id>_misc/ --> storages of data
* all_figures/ --> comparison figures
REMARKS: Takes a long time to compute, see resources/routines.py for more expanations on compare_two_orbits().
'''
###########################################################################
### INCLUDES
import numpy as np
import pylab as plt
import time
from resources.routines import *
from resources.TimeStepping import *
###########################################################################
### PARAMETERS
# Orbit id
orbit_id = 1001
# Error threshold
p = 0.1
# Show plots and detailled analysis ?
show = False
###########################################################################
### INITIALISATION
# File name for the computed orbit file
orbits_file = 'orbits.dat'
index_file = 'index.dat'
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
# setup a few variables
shift = 0
###########################################################################
### Load which orbits were computed
start = time.time()
orbits = np.loadtxt(folder_misc+orbits_file,dtype='i4')
data = np.zeros([np.shape(orbits)[0]-1,6])
previous_part = -1
for ii, orbit_current in enumerate(orbits[:,0]):
if ii == 0: continue
preceeding = orbits[ii-1,0]
shift = 1
pp, sl = compare_two_orbits(preceeding, orbit_current, orbit_id, folder=folder_flux,shift=shift,return_max=True)
pp_old = pp
if pp > p :
shift = 2
pp, sl = compare_two_orbits(preceeding, orbit_current, orbit_id, folder=folder_flux, shift = shift,return_max=True)
if pp > p :
shift = 0
pp, sl = compare_two_orbits(preceeding, orbit_current, orbit_id, folder=folder_flux, shift = shift,return_max=True)
if pp > p :
pp = pp_old
shift = 1
data[ii-1] = preceeding, orbit_current, orbits[ii,1], pp, sl, shift
percentage_done = round(float(ii)/float(np.shape(orbits)[0])*100,1)
if show: print percentage_done, preceeding, orbit_current, pp, sl, shift
else:
sys.stdout.write( '\r%3.1f%%' % (percentage_done) )
sys.stdout.flush()
header = 'ref,val,step,error,max_sl,shift'
fname = 'error_evolution.dat'
np.savetxt(folder_misc+fname,data,header=header, fmt='%4d,%4d,%2d,%1.8f,%2.8f,%d')
end = time.time()
elapsed_time = round((end-start)/60.,1)
print 'Done. Time needed : %3.1f minutes,' % elapsed_time
| {
"repo_name": "kuntzer/SALSA-public",
"path": "4_error_evolution.py",
"copies": "1",
"size": "2915",
"license": "bsd-3-clause",
"hash": -6990649964198226000,
"line_mean": 28.15,
"line_max": 117,
"alpha_frac": 0.6202401372,
"autogenerated": false,
"ratio": 3.09447983014862,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9090934448884584,
"avg_score": 0.024757103692807057,
"num_lines": 100
} |
## 4. Implementing Binary Search: Part 1 ##
# A function to extract a player's last name
def format_name(name):
return name.split(" ")[1] + ", " + name.split(" ")[0]
# The length of the data set
length = len(nba)
# Implement the player_age function. For now, just return what the instructions specify
def player_age(name):
# We need to format our name appropriately for successful comparison
name = format_name(name)
# First guess halfway through the list
first_guess_index = math.floor(length/2)
first_guess = format_name(nba[first_guess_index][0])
# Check where we should continue searching
if name < first_guess:
return "earlier"
elif name > first_guess:
return "later"
else:
return "found"
johnson_odom_age = player_age("Darius Johnson-Odom")
young_age = player_age("Nick Young")
adrien_age = player_age("Jeff Adrien")
## 5. Implementing Binary Search: Part 2 ##
# A function to extract a player's last name
def format_name(name):
return name.split(" ")[1] + ", " + name.split(" ")[0]
# The length of the data set
length = len(nba)
# Implement the player_age function. For now, just return what the instructions specify
def player_age(name):
# We need to format our name appropriately for successful comparison
name = format_name(name)
# Initial bounds of the search
upper_bound = length - 1
lower_bound = 0
# Index of first split
first_guess_index = math.floor(length/2)
first_guess = format_name(nba[first_guess_index][0])
# If the name comes before our guess
# Adjust the bounds as needed
# Else if the name comes after our guess
# Adjust the bounds as needed
# Else
# Player found, so return first guess
# Set the index of the second split
# Find and format the second guess
# Return the second guess
if name < first_guess:
upper_bound = first_guess_index - 1
#lower_bount = 0
elif name > first_guess:
lower_bound = first_guess_index + 1
#upper_bount = length - 1
else:
return first_guess
second_guess_index = math.floor((upper_bound + lower_bound) / 2)
second_guess = format_name(nba[second_guess_index][0])
return second_guess
gasol_age = player_age("Pau Gasol")
pierce_age = player_age("Paul Pierce")
## 7. Implementing Binary Search: Part 3 ##
# A function to extract a player's last name
def format_name(name):
return name.split(" ")[1] + ", " + name.split(" ")[0]
# The length of the data set
length = len(nba)
# Implement the player_age function. For now, just return what the instructions specify
def player_age(name):
# We need to format our name appropriately for successful comparison
name = format_name(name)
# Bounds of the search
upper_bound = length - 1
lower_bound = 0
# Index of first split. It's important to understand how we compute this
index = math.floor((upper_bound + lower_bound) / 2)
# First, guess halfway through the list
guess = format_name(nba[index][0])
# Keep guessing until it finds the name. Use a while loop here.
# Check where our guess is in relation to the name we're requesting,
# and adjust our bounds as necessary (multiple lines here).
# If we have found the name, we wouldn't be in this loop, so
# we shouldn't worry about that case
# Find the new index of our guess
# Find and format the new guess value
# When our loop terminates, we have found the right NBA player's name
while(guess != name):
if guess < name:
lower_bound = index + 1
elif guess > name:
upper_bound = index - 1
index = math.floor((upper_bound + lower_bound ) / 2)
guess = format_name(nba[index][0])
return "found"
carmelo_age = player_age("Carmelo Anthony")
## 8. Implementing Binary Search: Part 4 ##
# A function to extract a player's last name
def format_name(name):
return name.split(" ")[1] + ", " + name.split(" ")[0]
# The length of the data set
length = len(nba)
# Implement the player_age function. For now, just return what the instructions specify
def player_age(name):
name = format_name(name)
# Set the initial upper bound of the search
# Set the initial lower bound of the search
# Set the index of the first split (remember to use math.floor)
# First guess at index (remember to format the guess)
# Run search code until the name is equal to the guess, or upper bound is less than lower bound
# If name comes before the guess
# Change the appropriate bound
# Else (name comes after the guess)
# Change the appropriate bound
# Set the index of our next guess (remember to use math.floor)
# Retrieve and format our next guess
### Now that our loop has terminated, we must find out why ###
# If the name is equal to the guess
# Return the age of the player at index (column index 2 in data set)
# Else
# Return -1, because the function didn't find our player
def player_age(name):
name = format_name(name)
upper_bound = length - 1
lower_bound = 0
index = math.floor((upper_bound + lower_bound) / 2)
guess = format_name(nba[index][0])
while name != guess and upper_bound >= lower_bound:
if name < guess:
upper_bound = index - 1
else:
lower_bound = index + 1
index = math.floor((lower_bound + upper_bound) / 2)
guess = format_name(nba[index][0])
if name == guess:
return nba[index][2]
else:
return -1
curry_age = player_age("Stephen Curry")
griffin_age = player_age("Blake Griffin")
jordan_age = player_age("Michael Jordan") | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Data Structures & Algorithms/Binary Search-93.py",
"copies": "1",
"size": "5812",
"license": "mit",
"hash": -4533982484505742000,
"line_mean": 35.33125,
"line_max": 99,
"alpha_frac": 0.6459050241,
"autogenerated": false,
"ratio": 3.7066326530612246,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4852537677161225,
"avg_score": null,
"num_lines": null
} |
from __future__ import with_statement
import os
import logging
import pprint
import time
import sys
import datetime
import itertools
import math
import collections
import random
import traceback
import calendar
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
from django.utils import simplejson
from google.appengine.ext import db
from google.appengine.api import users as gaeusers
import oauth
import gmemsess
import foursquare
# ------------------------------------------------------------
# Data model.
# ------------------------------------------------------------
# Our one database model. One record per user. Stores user history
# and other user info.
class History(db.Model):
uid = db.IntegerProperty(required=True)
history = db.TextProperty()
history_date = db.DateTimeProperty()
public = db.BooleanProperty()
name = db.StringProperty()
picture = db.StringProperty()
def get_user_record(uid):
uid = int(uid)
user_q = History.gql('WHERE uid = :1', uid)
users = user_q.fetch(2)
if len(users) > 0:
if len(users) > 1:
logging.error('Multiple records for uid %s: %s' % (uid, users))
return users[0]
else:
logging.warn('User %s not in database.' % (uid,))
return None
def make_user_record(uid, name, picture):
return History(uid=uid, public=False, name=name, picture=picture)
# ------------------------------------------------------------
# Exceptions.
# ------------------------------------------------------------
class FourMapperException(Exception):
def __init__(self, http_status, msg):
Exception.__init__(self, msg)
self.http_status = http_status
# ------------------------------------------------------------
# Keep templates in the 'templates' subdirectory.
# ------------------------------------------------------------
TEMPLATE_PATH = os.path.join(os.path.dirname(__file__), 'templates')
def render_template(name, values):
return template.render(os.path.join(TEMPLATE_PATH, name), values)
# ------------------------------------------------------------
# API keys, etc. Stored in the keys subdirectory. Key files contain
# a single value--the contents of the file are evaled and the result
# is the value.
# ------------------------------------------------------------
key_cache = {}
def get_key(name, secret=False):
if name in key_cache:
return key_cache[name]
if secret:
extension = 'secret'
else:
extension = 'key'
path = os.path.join('keys', '%s.%s' % (name, extension))
with open(path, 'r') as f:
value = safer_eval(f.read())
key_cache[name] = value
return value
# ------------------------------------------------------------
# Request handlers.
# ------------------------------------------------------------
# Our request handler superclass. We implement our app-level
# exception handling strategy here.
class FourMapperRequestHandler(webapp.RequestHandler):
def handle_exception(self, exception, debug_mode):
logging.error('exception: %s\n%s' % (repr(exception), str(exception)))
logging.error('stack trace: %s' % (traceback.format_exc()))
if debug_mode or not isinstance(exception, FourMapperException):
self.error(500)
self.response.out.write(str(exception))
else:
self.error(exception.http_status)
self.response.out.write(str(exception))
# Admin page Doesn't really do anything.
class AdminPage(FourMapperRequestHandler):
def get(self):
# Make sure only I can access this.
user = gaeusers.get_current_user()
if not user:
self.redirect(gaeusers.create_login_url(self.request.uri))
else:
self.response.out.write('Hi, %s\n\n' % (user.nickname(),))
if not gaeusers.is_current_user_admin():
self.response.out.write('Sorry, you need to be an administrator to view this page.\n')
else:
self.response.out.write('Cool, you are an administrator.\n')
# Logged in user is an admin user.
# Dump all uids.
users = list(History.all())
self.response.out.write(' '.join([str(u.uid) for u in users]))
# The main page.
class MainPage(FourMapperRequestHandler):
"This is the main app page."
def get(self):
session = gmemsess.Session(self)
# These are the template variables we'll be filling.
session_user = None
gmap_api_key = ""
public_users = []
map_user = None
# Have we authorized this user?
if 'user_token' in session:
session_user = get_user_record(session['uid'])
# Get the appropriate google maps API key; there's one for
# 4mapper.appspot.com and one for localhost (for testing).
host = self.request.headers['Host'].split(':')[0]
gmaps_api_key = get_key('gmaps-api-key-%s' % (host,))
# Which user are we mapping (if any)?
if 'uid' in self.request.arguments():
map_user = get_user_record(self.request.get('uid'))
if not map_user:
raise FourMapperException(400, 'No such user %s' % (self.request.get('uid'),))
else:
map_user = session_user
template_values = {'gmaps_api_key': gmaps_api_key,
'session_user': session_user,
'map_user': map_user}
self.response.out.write(render_template('index.html', template_values))
# Public users page.
class PublicUsersPage(FourMapperRequestHandler):
"This page displays all users with public histories."
def get(self):
# Figure out which users have made their histories public.
public_user_q = History.gql('WHERE public = :1', True)
public_users = list(public_user_q)
logging.info('Displaying %s users with public histories.' % (len(public_users,)))
# Randomize the order
random.shuffle(public_users)
template_values = {'public_users': public_users}
self.response.out.write(render_template('users.html', template_values))
# ------------------------------------------------------------
# OAuth request handlers.
# ------------------------------------------------------------
class Authorize(FourMapperRequestHandler):
"""This page is used to do the oauth dance. It gets an app token
from foursquare, saves it in the session, then redirects to the
foursquare authorization page. That authorization page then
redirects to /oauth_callback.
"""
def get(self):
return self.run()
def post(self):
return self.run()
def run(self):
session = gmemsess.Session(self)
fs = get_foursquare(session)
app_token = fs.request_token()
auth_url = fs.authorize(app_token)
session['app_token'] = app_token.to_string()
session.save()
self.redirect(auth_url)
class OAuthCallback(FourMapperRequestHandler):
"""This is our oauth callback, which the foursquare authorization
page will redirect to. It gets the user token from foursquare,
saves it in the session, and redirects to the main page.
"""
def get(self):
session = gmemsess.Session(self)
fs = get_foursquare(session)
app_token = oauth.OAuthToken.from_string(session['app_token'])
user_token = fs.access_token(app_token)
session['user_token'] = user_token.to_string()
fs.credentials.set_access_token(user_token)
user = fs.user()['user']
uid = user['id']
session['uid'] = uid
# Make sure this user is in our DB and we save his most up-to-date
# name and photo.
user_record = get_user_record(uid)
if not user_record:
user_record = make_user_record(uid, user['firstname'], user['photo'])
user_record.put()
else:
user_record.name = user['firstname']
user_record.picture = user['photo']
user_record.put()
session.save()
self.redirect('/?uid=%s' % (uid,))
class FourUser(FourMapperRequestHandler):
def get(self):
session = gmemsess.Session(self)
fs = get_foursquare(session)
start_time = time.time()
user = fs.user()
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(simplejson.dumps(user))
class ToggleHistoryAccess(FourMapperRequestHandler):
def get(self):
session = gmemsess.Session(self)
user_record = get_user_record(session['uid'])
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(simplejson.dumps(user_record.public))
def post(self):
session = gmemsess.Session(self)
fs = get_foursquare(session)
user = fs.user()['user']
uid = user['id']
user_record = get_user_record(uid)
logging.info('Toggling public for uid %s from %s to %s' %
(uid, user_record.public, not user_record.public))
user_record.public = not user_record.public
user_record.put()
if 'r' in self.request.arguments():
self.redirect(self.request.get('r'))
class Logout(FourMapperRequestHandler):
def get(self):
session = gmemsess.Session(self)
session.invalidate()
self.redirect('/')
HTML_404 = '404 Error'
class PageNotFound(webapp.RequestHandler):
def get(self):
self.error(404)
self.response.out.write(HTML_404)
# ------------------------------------------------------------
# AJAX endpoint request handlers.
# ------------------------------------------------------------
class FourHistory(FourMapperRequestHandler):
"""This is an Ajax endpoint that returns a user's checkin history.
Requires Foursquare authorization.
"""
def get(self):
session = gmemsess.Session(self)
fs = get_foursquare(session)
start_time = time.time()
# Are we getting the current user's history, in which case we'll
# ask foursquare so as to get the latest info, or are we
# retrieving someone else's history?
if 'uid' in self.request.arguments() and \
((not 'uid' in session) or (int(self.request.get('uid')) != session['uid'])):
#
# We're getting someone else's history.
#
uid = int(self.request.get('uid'))
user_record = get_user_record(uid)
if not user_record:
logging.error('User %s has no history record.' % (uid,))
raise FourMapperException(400, 'No history for user.')
if not user_record.public:
logging.error('User %s has a private history.' % (uid,))
raise FourMapperException(403, 'No public history for user.')
history = simplejson.loads(user_record.history)
history = massage_history(history)
history_date = calendar.timegm(user_record.history_date.timetuple())
else:
#
# Get latest history for current user.
#
history = get_entire_history(fs)
history = massage_history(history)
# Store the history.
store_user_history(session['uid'], history)
history_date = time.time()
logging.info('history took %.3f s' % (time.time() - start_time,))
self.response.headers['Content-Type'] = 'text/plain'
result = {'checkins': history,
'history_date': history_date,
'statistics': generate_history_stats(history)}
self.response.out.write(simplejson.dumps(result))
# ------------------------------------------------------------
# Checkin statistics utilities.
# ------------------------------------------------------------
def generate_history_stats(history):
fn_start_time = time.time()
day_groups = []
history = sorted(history, key=lambda c: c['created_epoch'])
for k, g in itertools.groupby(history,
lambda c: datetime.datetime.fromtimestamp(c['created_epoch']).day):
day_group = list(g)
day_groups.append((datetime.datetime.fromtimestamp(day_group[0]['created_epoch']),
day_group))
# We'll return checkin counts and distances for the last 365 days.
checkin_counts = [0] * 365
distance_traveled = [0.0] * 365
# Limit checkins to the last 365 days.
now = datetime.datetime.now()
cutoff_date = now - datetime.timedelta(days=365)
day_groups = [(s, g) for (s, g) in day_groups if s >= cutoff_date]
# Compute checkin counts and distances for each day, and total
# number of checkins along with number of days with a checkin.
total_checkin_count = 0
day_count = len(day_groups)
for start_time, group in day_groups:
total_checkin_count += len(group)
assert start_time >= cutoff_date
for checkin in group:
time_delta = now - datetime.datetime.fromtimestamp(checkin['created_epoch'])
assert time_delta.days < 365
days_delta = time_delta.days
index = 364 - time_delta.days
checkin_counts[index] += 1
distance_traveled[index] = distance_between_checkins(group)
# Compute favorites.
all_favorites, venue_names = favorites_from_last_n_days(day_groups, now, 10000) # heh, i rebel.
# Recent favorites are the top venues from the last 30 days.
recent_favorites, venue_names_2 = favorites_from_last_n_days(day_groups, now, 30)
venue_names = merge_dicts(venue_names, venue_names_2)
# New Favorites are anything in the top 5 recent favorites that
# aren't in the top 20 all-time favorites.
new_favorites = set_difference(recent_favorites[-5:], all_favorites[-20:], key=FavoriteVenue.vid)
new_favorites = set_difference(recent_favorites[-5:], all_favorites[-20:], key=FavoriteVenue.vid)
new_favorites = sorted(new_favorites, key=FavoriteVenue.count)
# Forgotten favorites are all-time favorites that aren't recent or new favorites.
forgotten_favorites = set_difference(all_favorites,
set_union(recent_favorites[-10:], new_favorites[-10:], key=FavoriteVenue.vid),
key=FavoriteVenue.vid)
forgotten_favorites = sorted(forgotten_favorites, key=FavoriteVenue.count)
recent_favorites = [venue_names[fave.vid()] for fave in recent_favorites[-3:]]
new_favorites = [venue_names[fave.vid()] for fave in new_favorites][-3:]
forgotten_favorites = [venue_names[fave.vid()] for fave in forgotten_favorites[-3:]]
logging.info('statistics took %.3f s' % (time.time() - fn_start_time,))
return {'total_checkins': total_checkin_count,
'checkin_days': day_count,
'checkin_counts': checkin_counts,
'distances': distance_traveled,
'recent_favorites': recent_favorites,
'new_favorites': new_favorites,
'forgotten_favorites': forgotten_favorites,
'blurb': ''}
def set_difference(a, b, key=lambda x: x):
a_map = {}
for e in a:
a_map[key(e)] = e
for e in b:
if key(e) in a_map:
del a_map[key(e)]
return a_map.values()
def set_union(a, b, key=lambda x: x):
result = {}
for e in a:
result[key(e)] = e
for e in b:
result[key(e)] = e
return result.values()
class FavoriteVenue:
def __init__(self, vid, count):
self.venue_id = vid
self.checkin_count = count
def count(self):
return self.checkin_count
def vid(self):
return self.venue_id
def favorites_from_last_n_days(day_groups, now, n):
recent_day_groups = last_n_days(day_groups, now, n)
recent_venue_counts = collections.defaultdict(int)
venue_names = {}
for s, g in recent_day_groups:
for checkin in g:
if 'venue' in checkin and 'id' in checkin['venue']:
venue = checkin['venue']
vid = venue['id']
venue_names[vid] = venue['name']
recent_venue_counts[vid] += 1
recent_favorites = [FavoriteVenue(vid, count) for vid, count in recent_venue_counts.items()]
recent_favorites = sorted(recent_favorites, key=FavoriteVenue.count)
return recent_favorites, venue_names
def last_n_days(day_groups, now, n):
cutoff_date = now - datetime.timedelta(days=n)
day_groups = [(s, g) for (s, g) in day_groups if s > cutoff_date]
return day_groups
def all_but_last_n_days(day_groups, now, n):
cutoff_date = now - datetime.timedelta(days=n)
day_groups = [(s, g) for (s, g) in day_groups if s <= cutoff_date]
return day_groups
def distance_between_checkins(checkins):
# Filter out checkins that don't have venues or that don't have geo
# coordinates.
checkins = [c for c in checkins if 'venue' in c and 'geolat' in c['venue']]
distance = 0.0
for a, b in window(checkins, 2):
d = distance_between(a, b)
# logging.info('pair: %s %s' % (d, (a, b),))
distance += d
assert distance >= 0.0 and distance < 999999999.0, 'Bad distance %s for these checkins: %s' % (d, (a, b))
return distance
def distance_between(c1, c2):
def to_rad(d):
return d * math.pi / 180.0
v1 = c1['venue']
v2 = c2['venue']
lat1 = to_rad(v1['geolat'])
lon1 = to_rad(v1['geolong'])
lat2 = to_rad(v2['geolat'])
lon2 = to_rad(v2['geolong'])
r = 6371
p = math.sin(lat1) * math.sin(lat2) + \
math.cos(lat1) * math.cos(lat2) * \
math.cos(lon2 - lon1)
if p >= 1.0:
d = 0.0
else:
d = math.acos(math.sin(lat1) * math.sin(lat2) + \
math.cos(lat1) * math.cos(lat2) * \
math.cos(lon2 - lon1)) * r
return d
def window(seq, n=2):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(seq)
result = tuple(itertools.islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
# ------------------------------------------------------------
# Misc. utilities
# ------------------------------------------------------------
def massage_history(history):
# Massage the history a bit.
if 'checkins' in history:
history = history['checkins']
if history == None:
history = []
# Now add a seconds-since-epoch version of each checkin
# timestamp.
history = add_created_epoch(history)
return history
def add_created_epoch(history):
# Now add a seconds-since-epoch version of each checkin
# timestamp.
h = []
for c in history:
c['created_epoch'] = checkin_ts = seconds_since_epoch_of_checkin(c)
h.append(c)
return h
def store_user_history(uid, history):
history_s = simplejson.dumps(history)
logging.info('Storing history for user %s (%s bytes)' % (uid, len(history_s)))
user_record = get_user_record(uid)
user_record.history = history_s
user_record.history_date = datetime.datetime.now()
user_record.put()
def get_entire_history(fs):
history = []
logging.info('Getting all checkins for user')
for h in foursquare.history_generator(fs):
# Annoying that Foursquare uses null/None to indicate zero
# checkins.
logging.info(' Getting more checkins...')
if h['checkins']:
history += h['checkins']
return history
def merge_dicts(a, b):
if a == None:
return b
if b == None:
return a
r = {}
for key, value in a.items():
r[key] = value
for key, value in b.items():
r[key] = value
return r
def safer_eval(s):
return eval(s, {'__builtins__': None}, {})
def get_foursquare(session):
"""Returns an instance of the foursquare API initialized with our
oauth info.
"""
oauth_consumer_key = get_key('foursquare-oauth-consumer-key', secret=True)
oauth_consumer_secret = get_key('foursquare-oauth-consumer-secret', secret=True)
fs = foursquare.Foursquare(foursquare.OAuthCredentials(oauth_consumer_key, oauth_consumer_secret))
if 'user_token' in session:
user_token = oauth.OAuthToken.from_string(session['user_token'])
fs.credentials.set_access_token(user_token)
return fs
# ------------------------------------------------------------
# Start of some recommendation stuff.
# ------------------------------------------------------------
def seconds_since_epoch_of_checkin(c):
import rfc822
try:
checkin_ts = time.mktime(rfc822.parsedate(c['created']))
except Exception, e:
logging.error("Unable to parse date of checkin %s: %s" % (`c`, e))
raise FourMapperException(500, 'Unable to parse date in checkin')
return checkin_ts
def venues_in_common(p1, p2):
p1 = [v for v in p1 if p1[v] > 0]
p2 = [v for v in p2 if p2[v] > 0]
return list(set(p1).intersection(set(p2)))
class RecommendationsPage(FourMapperRequestHandler):
def get(self):
prefs = get_preferences()
all_venues = set()
for user in prefs:
for venue in prefs[user]:
all_venues.add(venue)
similarities = []
USER = 760
for user in prefs:
if user != USER and len(prefs[user]) > 0:
similarities.append((user,
sim_pearson(all_venues, prefs, USER, user),
venues_in_common(prefs[USER], prefs[user])))
similarities = sorted(similarities, key=lambda e: e[1], reverse=True)
sims = []
for sim in similarities[0:10]:
sims.append({'uid': sim[0], 'score': sim[1], 'venues': sim[2]})
self.response.out.write(render_template('recs.html', {'similarities': sims}))
g_prefs = None
def get_preferences():
global g_prefs
if g_prefs:
return g_prefs
else:
user_data = {}
users = list(History.all())
start_time = time.time()
for user in users:
history = massage_history(simplejson.loads(user.history))
checkins = collections.defaultdict(int)
for checkin in history:
if 'venue' in checkin and 'id' in checkin['venue']:
venue_id = checkin['venue']['id']
checkins[venue_id] += 1
user_data[user.uid] = checkins
g_prefs = user_data
return user_data
import math
def simplified_sim_pearson(p1, p2):
n = len(p1)
assert (n != 0)
sum1 = sum(p1)
sum2 = sum(p2)
m1 = float(sum1) / n
m2 = float(sum2) / n
p1mean = [(x - m1) for x in p1]
p2mean = [(y - m2) for y in p2]
numerator = sum(x * y for x, y in zip(p1mean, p2mean))
denominator = math.sqrt(sum(x * x for x in p1mean) * sum(y * y for y in p2mean))
return numerator / denominator if denominator else 0
def sim_pearson(all_items, prefs, p1, p2):
p1 = prefs[p1]
p2 = prefs[p2]
p1_x = [p1[k] for k in all_items]
p2_x = [p2[k] for k in all_items]
if len(p1_x) > 0 and len(p2_x) > 0:
return simplified_sim_pearson(p1_x, p2_x)
else:
return 0.0
# ------------------------------------------------------------
# Application URL routing.
# ------------------------------------------------------------
application = webapp.WSGIApplication([('/authorize', Authorize),
('/oauth_callback', OAuthCallback),
('/logout', Logout),
('/toggle_public', ToggleHistoryAccess),
('/4/history', FourHistory),
('/4/user', FourUser),
('/recommend', RecommendationsPage),
('/', MainPage),
('/users', PublicUsersPage),
('/admin', AdminPage),
('/.*', PageNotFound)],
#debug=True
)
def real_main():
run_wsgi_app(application)
def profile_main():
# This is the main function for profiling
# We've renamed our original main() above to real_main()
import cProfile, pstats
prof = cProfile.Profile()
prof = prof.runctx("real_main()", globals(), locals())
print "<pre>"
stats = pstats.Stats(prof)
stats.sort_stats("time") # Or cumulative
stats.print_stats(80) # 80 = how many to print
# The rest is optional.
# stats.print_callees()
# stats.print_callers()
print "</pre>"
main = real_main
if __name__ == "__main__":
main()
| {
"repo_name": "wiseman/4mapper",
"path": "code.py",
"copies": "1",
"size": "23753",
"license": "mit",
"hash": -7906417336536080000,
"line_mean": 30.9690444145,
"line_max": 117,
"alpha_frac": 0.601524018,
"autogenerated": false,
"ratio": 3.54046802802206,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9530527190741434,
"avg_score": 0.02229297105612524,
"num_lines": 743
} |
# 4. Median of Two Sorted Arrays - LeetCode
# https://leetcode.com/problems/median-of-two-sorted-arrays/description/
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
left1 = 0
left2 = 0
right1 = len(nums1) - 1
right2 = len(nums2) - 1
total = len(nums2) + len(nums1)
odd = ( total ) % 2
while total > 2:
# Pop Left
if left1 <= right1 and left2 <= right2:
if nums1[left1] < nums2[left2]:
left1 += 1
else:
left2 += 1
elif left1 > right1: # num1 null
left2 += 1
elif left2 > right2:
left1 += 1
# Pop Right
if right1 >= left1 and right2 >= left2:
if nums1[right1] > nums2[right2]:
right1 -= 1
else:
right2 -= 1
elif right2 < left2: # num2 null
right1 -= 1
elif right1 < left1:
right2 -= 1
total -= 2
if odd:
if (left2 - 1) == right2:
return nums1[left1]
elif (left1 - 1) == right1:
return nums2[left2]
else:
if (left1 - 1) == right1: # left empty
return (nums2[left2] + nums2[right2]) / 2.0
elif (left2 - 1) == right2: # right empty
return (nums1[left1] + nums1[right1]) / 2.0
else:
return ( nums1[left1] + nums2[left2] ) / 2.0
ans = [
([1],[1],1),
([1,2],[3],2),
([1,2,3],[4],2.5),
([4],[1,2,3],2.5),
]
s = Solution()
for i in ans:
r = s.findMedianSortedArrays(i[0],i[1])
print r, r == i[2] | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/004_median-of-two-sorted-arrays.py",
"copies": "1",
"size": "1884",
"license": "mit",
"hash": 6445847296338236000,
"line_mean": 29.4032258065,
"line_max": 72,
"alpha_frac": 0.4336518047,
"autogenerated": false,
"ratio": 3.4824399260628467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44160917307628467,
"avg_score": null,
"num_lines": null
} |
"""4. Median of Two Sorted Arrays
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).
Example 1:
nums1 = [1, 3]
nums2 = [2]
The median is 2.0
Example 2:
nums1 = [1, 2]
nums2 = [3, 4]
The median is (2 + 3)/2 = 2.5
"""
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
if len(nums1) > len(nums2):
nums1, nums2 = nums2, nums1
cur1 = step = len(nums1) // 2
cur2 = len(nums2) // 2
while 0 <= cur1 < len(nums1) and 0 <= cur2 < len(nums2):
isok1 = cur2 < 1 or nums1[cur1] >= nums2[cur2 - 1]
isok2 = cur1 < 1 or nums2[cur2] >= nums1[cur1 - 1]
step //= 2
if not isok1:
next_step = max(step, 1)
elif not isok2:
next_step = -max(step, 1)
else:
break
cur1 += next_step
cur2 -= next_step
if (len(nums1) + len(nums2)) % 2 == 1:
if 0 <= cur1 < len(nums1) and 0 <= cur2 < len(nums2):
return min(nums1[cur1], nums2[cur2])
elif 0 <= cur1 < len(nums1):
return nums1[cur1]
else:
return nums2[cur2]
else:
if len(nums1) % 2 == 1:
return sum(sorted(nums1[cur1:cur1 + 2] + nums2[cur2:cur2 + 2])[:2]) * 0.5
else:
return (max(nums1[cur1 - 1:cur1] + nums2[cur2 - 1:cur2]) +
min(nums1[cur1:cur1 + 1] + nums2[cur2:cur2 + 1])) * 0.5
| {
"repo_name": "nadesico19/nadepy",
"path": "leetcode/algo_4_median_of_two_sorted_arrays.py",
"copies": "1",
"size": "1748",
"license": "mit",
"hash": -4272894268155824000,
"line_mean": 28.6271186441,
"line_max": 97,
"alpha_frac": 0.4862700229,
"autogenerated": false,
"ratio": 3.1158645276292334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9100328178262813,
"avg_score": 0.0003612744532841385,
"num_lines": 59
} |
# 4. More Control Flow Tools 深入流程控制
# -*- coding: utf-8 -*-
# 4.1 if声明
"""
x = int(input("Please enter an integer: "))
if x < 0:
x = 0
print('Negative changed to zero')
elif x == 0:
print('Zero')
elif x == 1:
print('Single')
else:
print('More')
# 4.2。for声明
words = ['cat', 'window', 'defenestrate']
for w in words:
print(w, len(w))
for w in words[:]: # 在整个列表的一个切片副本上循环。
if len(w) > 6:
words.insert(0, w)
print(words)
#4.3 range()功能
for i in range(5):
print(i)
#0 1 2 3 4
for i in range(5, 10):
print(i)
# 5 6 7 8 9
for i in range(0, 10, 3):
print(i)
# 0 3 6 9
for i in range(-10, -100, -30):
print(i)
# -10 -40 -70
a = ['Mary', 'had', 'a', 'little', 'lamb']
for i in range(len(a)):
print(i, a[i])
print(range(10))
# range(0, 10)
# 在许多方面,返回的对象的range()行为就像它是一个列表,但实际上它不是。它是一个对象,当您遍历它时返回所需序列的连续项,但它并不真正使列表,从而节省空间。
# 我们说这样一个对象是可迭代的,也就是说,适合作为一个功能和结构的目标,它们期望能从中获得连续的项目,直到供应耗尽。我们已经看到这个for说法是这样一个迭代器。功能list() 是另一个 它从迭代创建列表:
print(list(range(5)))
# [0, 1, 2, 3, 4]
# 4.4 break和continue声明,以及else条款上循环
# 这个例子是以下循环,它搜索素数:
for n in range(2, 10):
for x in range(2, n):
if n % x == 0:
print(n, 'equals', x, '*', n // x)
break
else: # 没有找到一个因素的循环失败了。
print(n, 'is a prime number')
for num in range(2, 10):
if num % 2 == 0:
print("Found an even number", num)
continue
print("Found a number", num)
# 4.5。pass声明
while True:
pass # Busy-wait for keyboard interrupt (Ctrl+C)
"""
# 4.6。定义函数
# 可以创建一个将斐波纳契系列写入任意边界的函数:
def fib(n): # write Fibonacci series up to n """Print a Fibonacci series up to n."""
a, b = 0, 1
while a < n:
print(a, ' ')
a, b = b, a + b
print()
# Now call the function we just defined:
fib(2000)
| {
"repo_name": "taoshujian/taoshujian",
"path": "Python/Tutorial/4.ControlFlow.py",
"copies": "1",
"size": "2326",
"license": "apache-2.0",
"hash": -8008732345775252000,
"line_mean": 18.311827957,
"line_max": 104,
"alpha_frac": 0.5723830735,
"autogenerated": false,
"ratio": 1.7995991983967936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7861017708095746,
"avg_score": 0.0021929127602097567,
"num_lines": 93
} |
# 4
# version number needs to be incremented each time the file changes!
import math
import mathutils
import struct
import bpy
import json
writePackedBinary = True
filepathPRFX = bpy.data.filepath.rsplit('.', 1)[0]
meshfile = open(filepathPRFX + ".mesh.bin", "wb") if writePackedBinary else open(filepathPRFX + ".mesh.json", "w")
matfile = open(filepathPRFX + ".mat.json", "w")
camfile = open(filepathPRFX + ".cam.json", "w")
objList = bpy.data.objects
matList = bpy.data.materials
scene = bpy.context.scene
materialDict = {}
matIdx = 0
materialExport = []
for mat in matList:
print('Exporting material: ' + mat.name)
material = {}
material["name"] = mat.name
material["specHardness"] = mat.specular_hardness
material["emits"] = mat.emit
material["ior"] = mat.raytrace_transparency.ior
materialType = "NULL"
if mat.use_transparency is True:
materialType = "GLASS"
# small hack because there is no definition for absorbtion color for dielectrics
material["color"] = [mat.diffuse_color[0], mat.diffuse_color[1], mat.diffuse_color[2]]
if mat.specular_intensity > 0 or mat.specular_hardness > 1:
if materialType is not "NULL":
print("WARNING: Non-unique material definition! Was [" + materialType + "], gets [PLASTIC]!")
materialType = "PLASTIC"
material["color"] = [mat.diffuse_color[0], mat.diffuse_color[1], mat.diffuse_color[2]]
if mat.raytrace_mirror.use is True:
if materialType is not "NULL":
print("WARNING: Non-unique material definition! Was [" + materialType + "], gets [MIRROR]!")
materialType = "MIRROR"
material["color"] = [mat.diffuse_color[0], mat.diffuse_color[1], mat.diffuse_color[2]]
if mat.emit > 0:
if materialType is not "NULL":
print("WARNING: Non-unique material definition! Was [" + materialType + "], gets [PLASTIC]!")
materialType = "EMITTING"
material["color"] = [mat.diffuse_color[0], mat.diffuse_color[1], mat.diffuse_color[2]]
if materialType is "NULL":
#fallback to DIFFUSE
materialType = "DIFFUSE"
material["color"] = [mat.diffuse_color[0], mat.diffuse_color[1], mat.diffuse_color[2]]
print("Identified " + mat.name + " as " + materialType+"\n")
material["type"] = materialType
materialExport.append(material)
materialDict[mat.name] = matIdx
matIdx += 1
matfile.write(json.dumps(materialExport))
# --------------------- Object Geometry export -----------------------------
if writePackedBinary is False:
meshfile.write("[") # manual json wrapper to save memory while exporting very large scenes
exportedMeshes = 0
polyCount = 0
for obj in objList:
if obj.type == "CAMERA":
cam = obj.data
if cam.type != "PERSP":
print('no support for camera models other than \'perspective\'. Ignoring ' + cam.name)
continue
else:
print("Exporting PERSP Camera")
focalLength = (cam.lens/cam.sensor_width)*36.0
objmatrix = obj.matrix_world
eyeV = mathutils.Vector([0, 0, 0, 1])
targetV = mathutils.Vector([0, 0, 1, 0])
upV = mathutils.Vector([0, 1, 0, 0])
eyeV = eyeV * objmatrix
dirV = targetV * objmatrix
upV = upV * objmatrix
camExport = {}
camExport["position"] = [obj.location[0], obj.location[1], obj.location[2]]
camExport["rotation"] = [obj.rotation_euler[0], obj.rotation_euler[1], obj.rotation_euler[2]]
camExport["viewDirection"] = [dirV[0], dirV[1], dirV[2]]
camExport["upVector"] = [upV[0], upV[1], upV[2]]
camExport["focalLength"] = focalLength
camfile.write(json.dumps(camExport))
if obj.type == "MESH":
print('Exporting a mesh object: ' + obj.name + '(' + obj.data.name + ')')
objMesh = obj.to_mesh(scene, True, 'RENDER')
objMesh.transform(obj.matrix_world, True)
if writePackedBinary:
for face in objMesh.polygons:
p0 = objMesh.vertices[face.vertices[0]].co
p1 = objMesh.vertices[face.vertices[1]].co
p2 = objMesh.vertices[face.vertices[2]].co
meshfile.write(struct.pack("fff", p0.x, p0.y, p0.z))
meshfile.write(struct.pack("fff", p1.x, p1.y, p1.z))
meshfile.write(struct.pack("fff", p2.x, p2.y, p2.z))
meshfile.write(struct.pack("B", materialDict[objMesh.materials[face.material_index].name]))
polyCount += 1
else:
if exportedMeshes > 0:
meshfile.write(", ")
mesh = {}
mesh["name"] = obj.name
mesh["type"] = "TRIANGULAR_MESH"
mesh["triangles"] = []
for face in objMesh.polygons:
p0 = objMesh.vertices[face.vertices[0]].co
p1 = objMesh.vertices[face.vertices[1]].co
p2 = objMesh.vertices[face.vertices[2]].co
mesh["triangles"].append({"p0": [p0.x, p0.y, p0.z], "p1": [p1.x, p1.y, p1.z], "p2": [p2.x, p2.y, p2.z],
"m": materialDict[objMesh.materials[face.material_index].name]})
polyCount += 1
meshfile.write(json.dumps(mesh))
exportedMeshes += 1
if exportedMeshes > 0 and writePackedBinary is False:
meshfile.write("]\n")
meshfile.close()
matfile.close()
camfile.close()
print("---------Statistics---------")
print("Nr. of Materials: " + str(matIdx))
print("Nr. of Meshes: " + str(exportedMeshes))
print("Nr. of Polygons: " + str(polyCount))
print("Nr. of Cameras: 1")
print("----------------------------")
print("Have fun!")
| {
"repo_name": "bensteinert/chromarenderer-java",
"path": "chroma-java-core/src/main/resources/blenderToChroma.py",
"copies": "1",
"size": "5789",
"license": "mit",
"hash": 132603487217403360,
"line_mean": 36.5909090909,
"line_max": 119,
"alpha_frac": 0.5868025566,
"autogenerated": false,
"ratio": 3.397300469483568,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9444626192863371,
"avg_score": 0.007895366644039363,
"num_lines": 154
} |
## 4. Querying a normalized database ##
portman_query = '''select ceremonies.year, nominations.movie from nominations
inner join ceremonies
on nominations.ceremony_id == ceremonies.id
where nominations.nominee == "Natalie Portman";
'''
portman_movies = conn.execute(portman_query).fetchall()
for p in portman_movies:
print(p)
## 6. Join table ##
five_join_table = conn.cursor().execute('select * from movies_actors limit 5;').fetchall()
five_movies = conn.cursor().execute('select * from movies limit 5;').fetchall()
five_actors = conn.cursor().execute('select * from actors limit 5;').fetchall()
print(five_join_table)
print(five_actors)
print(five_movies)
## 7. Querying a many-to-many relation ##
q = '''
SELECT actors.actor,movies.movie FROM movies
INNER JOIN movies_actors ON movies.id == movies_actors.movie_id
INNER JOIN actors ON movies_actors.actor_id == actors.id
WHERE movies.movie == "The King's Speech";
'''
kings_actors = conn.execute(q).fetchall()
print(kings_actors)
## 8. Practice: querying a many-to-many relation ##
q = '''
select movies.movie, actors.actor from movies
inner join movies_actors on movies.id == movies_actors.movie_id
inner join actors on actors.id == movies_actors.actor_id
where actors.actor == "Natalie Portman";
'''
portman_joins = conn.execute(q).fetchall()
print(portman_joins) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "SQL and Databases Intermediate/Database Normalization and Relations-180.py",
"copies": "1",
"size": "1331",
"license": "mit",
"hash": 2025189934928575500,
"line_mean": 31.487804878,
"line_max": 90,
"alpha_frac": 0.7325319309,
"autogenerated": false,
"ratio": 3.038812785388128,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4271344716288128,
"avg_score": null,
"num_lines": null
} |
## 4. Reading in the data ##
import pandas as pd
data_files = [
"ap_2010.csv",
"class_size.csv",
"demographics.csv",
"graduation.csv",
"hs_directory.csv",
"sat_results.csv"
]
data = {}
for f in data_files:
d = pd.read_csv("schools/{0}".format(f))
key_name = f.replace(".csv", "")
data[key_name] = d
## 5. Exploring the SAT data ##
print(data['sat_results'].head(5))
## 6. Exploring the other data ##
for key in data:
print(data[key].head(5))
## 7. Reading in the survey data ##
all_survey = pd.read_csv('schools/survey_all.txt',delimiter='\t',encoding='windows-1252')
d75_survey = pd.read_csv('schools/survey_d75.txt',delimiter='\t',encoding='windows-1252')
survey = pd.concat([all_survey,d75_survey],axis=0)
survey.head(5)
## 8. Cleaning up the surveys ##
survey['DBN']=survey['dbn']
cols = ["DBN", "rr_s", "rr_t", "rr_p", "N_s", "N_t", "N_p", "saf_p_11", "com_p_11", "eng_p_11", "aca_p_11", "saf_t_11", "com_t_11", "eng_t_11", "aca_t_11", "saf_s_11", "com_s_11", "eng_s_11", "aca_s_11", "saf_tot_11", "com_tot_11", "eng_tot_11", "aca_tot_11"]
survey = survey[cols]
data['survey']=survey
data['survey'].shape
## 9. Inserting DBN fields ##
data["hs_directory"]["DBN"] = data["hs_directory"]["dbn"]
def pad_csd(num):
string_representation = str(num)
if len(string_representation) > 1:
return string_representation
else:
return string_representation.zfill(2)
data["class_size"]["padded_csd"] = data["class_size"]["CSD"].apply(pad_csd)
data["class_size"]["DBN"] = data["class_size"]["padded_csd"] + data["class_size"]["SCHOOL CODE"]
print(data["class_size"].head())
## 10. Combining the SAT scores ##
['cols = [\'SAT Math Avg. Score\', \'SAT Critical Reading Avg. Score\', \'SAT Writing Avg. Score\']\nfor c in cols:\n data["sat_results"][c] = pd.to_numeric(data["sat_results"][c], errors="coerce")\n\ndata[\'sat_results\'][\'sat_score\'] = data[\'sat_results\'][cols[0]] + data[\'sat_results\'][cols[1]] + data[\'sat_results\'][cols[2]]\nprint(data[\'sat_results\'][\'sat_score\'].head())']
## 12. Extracting the longitude ##
import re
def find_long(loc):
coords = re.findall("\(.+, .+\)", loc)
long = coords[0].split(",")[1].replace(")", "")
return long
data['hs_directory']['lon'] = data['hs_directory']['Location 1'].apply(find_long)
cols = ['lon','lat']
for c in cols:
data["hs_directory"][c] = pd.to_numeric(data["hs_directory"][c], errors="coerce")
print(data['hs_directory'].head(5)) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Data Exploration/Data Cleaning Walkthrough_ Cleaning The Data-208.py",
"copies": "1",
"size": "2496",
"license": "mit",
"hash": -4656309392994924000,
"line_mean": 33.6805555556,
"line_max": 393,
"alpha_frac": 0.6137820513,
"autogenerated": false,
"ratio": 2.689655172413793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38034372237137926,
"avg_score": null,
"num_lines": null
} |
# 4
# add hack
# add hackerrank
# find hac
# find hak
#
# 2
# 0
answer = ''
dictionary_head = {}
def add(value, dictionary):
first_char = value[0]
dict_next = dictionary.get(first_char)
if dict_next is None:
dict_next = {'_': 0}
dictionary[first_char] = dict_next
dict_next['_'] += 1
if len(value) > 1:
add(value[1:], dict_next)
else:
dict_next[None] = 1
def count_leafs(dictionary):
return dictionary['_']
# counter = 0
# for key in dictionary:
# if key is None:
# counter += 1
# else:
# counter += count_leafs(dictionary[key])
# return counter
def find(value, dictionary):
if len(value) > 0:
first_char = value[0]
dict_next = dictionary.get(first_char)
if dict_next is not None:
return find(value[1:], dict_next)
else:
return 0
else:
return count_leafs(dictionary)
def find_print(value):
global answer
answer += (str(find(value, dictionary_head)) + '\n')
n = int(input().strip())
for a0 in range(n):
op, contact = input().strip().split(' ')
if op == 'add':
add(contact, dictionary_head)
else:
find_print(contact)
print(answer)
# print(dictionary_head)
# add('hack', dictionary_head)
# add('hackerrank', dictionary_head)
# find_print('hak')
# find_print('hac')
| {
"repo_name": "eugenj/global-notes",
"path": "python_learn/contacts.py",
"copies": "1",
"size": "1466",
"license": "mit",
"hash": 4715599396825900000,
"line_mean": 18.9428571429,
"line_max": 56,
"alpha_frac": 0.5375170532,
"autogenerated": false,
"ratio": 3.316742081447964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9354259134647964,
"avg_score": 0,
"num_lines": 70
} |
# 4. SNMP Basics
#
# a. Create an 'SNMP' directory in your home directory.
#
# $ mkdir SNMP
# $ cd SNMP
#
# b. Verify that you can import the snmp_helper library. This is a small library that I created to simplify aspects of PySNMP.
#
# $ python
# Python 2.7.5 (default, Feb 11 2014, 07:46:25)
# [GCC 4.8.2 20140120 (Red Hat 4.8.2-13)] on linux2
# Type "help", "copyright", "credits" or "license" for more information.
# >>>
# >>> import snmp_helper
#
# c. Create a script that connects to both routers (pynet-rtr1 and pynet-rtr2) and prints out both the MIB2 sysName and sysDescr.
import snmp_helper
COMMUNITY_STRING = "galileo"
RTR1_SNMP_PORT = 7961
RTR2_SNMP_PORT = 8061
IP = "50.76.53.27"
RTR1 = (IP, COMMUNITY_STRING, RTR1_SNMP_PORT)
RTR2 = (IP, COMMUNITY_STRING, RTR2_SNMP_PORT)
sysName = "1.3.6.1.2.1.1.5.0"
sysDescr = "1.3.6.1.2.1.1.1.0"
## Getting snmp info
snmp_SysName_rtr1 = snmp_helper.snmp_get_oid(RTR1, oid=sysName)
snmp_SysDescr_rtr1 = snmp_helper.snmp_get_oid(RTR1, oid=sysDescr)
snmp_SysName_rtr2 = snmp_helper.snmp_get_oid(RTR2, oid=sysName)
snmp_SysDescr_rtr2 = snmp_helper.snmp_get_oid(RTR2, oid=sysDescr)
## Saving SNMP info in a variable
snmp_RTR1_output_SysName = snmp_helper.snmp_extract(snmp_SysName_rtr1)
snmp_RTR1_output_SysDescr = snmp_helper.snmp_extract(snmp_SysDescr_rtr1)
snmp_RTR2_output_SysName = snmp_helper.snmp_extract(snmp_SysName_rtr2)
snmp_RTR2_output_SysDescr = snmp_helper.snmp_extract(snmp_SysDescr_rtr2)
print "******************* " + snmp_RTR1_output_SysName + " *********************"
print snmp_RTR1_output_SysDescr
print "******************* " + snmp_RTR2_output_SysName + " *********************"
print snmp_RTR2_output_SysDescr
| {
"repo_name": "linkdebian/pynet_course",
"path": "class2/exercise4.py",
"copies": "1",
"size": "1708",
"license": "apache-2.0",
"hash": -3101316275643214000,
"line_mean": 29.5,
"line_max": 131,
"alpha_frac": 0.6797423888,
"autogenerated": false,
"ratio": 2.5454545454545454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8625569408419036,
"avg_score": 0.019925505167101808,
"num_lines": 56
} |
"""4Sum
Given an array `nums` of n integers and an integer `target`, are there elements a, b, c and d in
`nums` such that a + b + c + d = target ? Find all unique quadruplets in the array which gives the
sum of target.
Note:
The solution set must not contain duplicate quadrupletes.
Example:
Givane array nums = [1, 0, -1, 0, -2, 2], and target = 0.
A solution set is :
[
[-1, 0, 0, 1],
[-2, -1, 1, 2],
[-2, 0, 0, 2]
]
Refer https://leetcode.com/problems/4sum
"""
class Solution:
"""二分法思想"""
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
results = []
if len(nums) < 4:
return results
nums.sort()
for i1 in range(len(nums) - 3):
left = target - nums[i1]
# 注意右边界的选择,还剩余两个元素需要选择
for i2 in range(i1 + 1, len(nums) - 2):
l = i2 + 1
r = len(nums) - 1
while l < r:
s = nums[i2] + nums[l] + nums[r]
if s == left:
item = [nums[i1], nums[i2], nums[l], nums[r]]
# 已存在判断
if item not in results:
results.append(item)
l += 1
r -= 1
elif s < left:
l += 1
else:
r -= 1
return results
class SolutionRecursive:
"""递归解法
基于递归思想缩小问题规模,两个要点:
1. 如何缩小问题规模?
2. 何时停止递归?
"""
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
nums.sort()
results = set()
def find_nsum(low, high, target, n, subresult):
if n < 2:
return
if high - low + 1 < n:
return
if target < nums[low] * n:
return
if target > nums[high] * n:
return
if n == 2:
# two sum
while low < high:
s = nums[low] + nums[high]
if s < target:
low += 1
elif s > target:
high -= 1
else:
results.add((*subresult, nums[low], nums[high]))
low += 1
high -= 1
else:
for i in range(low, high + 1):
# why ?
if i == low or (i > low and nums[i - 1] != nums[i]):
find_nsum(i + 1, high, target - nums[i], n - 1, (*subresult, nums[i]))
find_nsum(0, len(nums) - 1, target, 4, [])
return list(results)
if __name__ == '__main__':
cases = [([1, 0, -1, 0, -2, 2], 0, [[-1, 0, 0, 1], [-2, -1, 1, 2], [-2, 0, 0, 2]]),
([0, 0, 0, 0], 0, [[0, 0, 0, 0]]),
([-3, -2, -1, 0, 0, 1, 2, 3], 0, [[-3, -2, 2, 3], [-3, -1, 1, 3], [-3, 0, 0, 3],
[-3, 0, 1, 2], [-2, -1, 0, 3], [-2, -1, 1, 2],
[-2, 0, 0, 2], [-1, 0, 0, 1]])]
solutions = [Solution, SolutionRecursive]
for case in cases:
target_set = set()
for item in case[2]:
target_set.add(tuple(item))
for solution in solutions:
result_set = set()
for item in solution().fourSum(case[0], case[1]):
result_set.add(tuple(item))
assert target_set == result_set
| {
"repo_name": "aiden0z/snippets",
"path": "leetcode/018_4sum.py",
"copies": "1",
"size": "3820",
"license": "mit",
"hash": -9187323078683178000,
"line_mean": 27.796875,
"line_max": 98,
"alpha_frac": 0.3906673901,
"autogenerated": false,
"ratio": 3.3267148014440435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42173821915440435,
"avg_score": null,
"num_lines": null
} |
"""4th update that adds users and roles
Revision ID: 32a8ac91d28d
Revises: 37a45401820b
Create Date: 2015-11-08 16:56:55.335926
"""
# revision identifiers, used by Alembic.
revision = '32a8ac91d28d'
down_revision = '37a45401820b'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=80), nullable=False),
sa.Column('password', sa.String(length=128), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('roles')
op.drop_table('users')
### end Alembic commands ###
| {
"repo_name": "Rdbaker/GameCenter",
"path": "migrations/versions/32a8ac91d28d_.py",
"copies": "2",
"size": "1317",
"license": "mit",
"hash": -6200086952976656000,
"line_mean": 29.6279069767,
"line_max": 64,
"alpha_frac": 0.6659073652,
"autogenerated": false,
"ratio": 3.4031007751937983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5069008140393798,
"avg_score": null,
"num_lines": null
} |
# decode a frame
def readFrame(cyclecount0, cyclecount1):
if cyclecount0 < cyclecount1 :
if cyclecount1 < 2 * cyclecount0 :
return 0 # c0 < c1 < 2.c0 : 1100
return 1 # 2.c0 < c1 : 1110
else :
if cyclecount0 < 2 * cyclecount1 :
return 0 # c1 < c0 < 2.c1 : 1100
return CARRIER # 2.c1 < c0 : 1000
return ERROR # never reach
# receive a frame
def getFrame(pin):
if not GPIO.input(pin):
GPIO.wait_for_edge(pin, GPIO.RISING)
start1 = time.time()
GPIO.wait_for_edge(pin, GPIO.FALLING)
start0 = time.time()
cyclecount1 = start0 - start1
GPIO.wait_for_edge(pin, GPIO.RISING)
cyclecount0 = time.time() - start0
return readFrame(cyclecount0, cyclecount1)
#### #### #### ####
def sendCarrier(pin, sleepms):
GPIO.output(pin, True)
time.sleep(0.001 * sleepms)
GPIO.output(pin, False)
time.sleep(0.003 * sleepms)
def send0(pin, sleepms):
GPIO.output(pin, True)
time.sleep(0.002 * sleepms)
GPIO.output(pin, False)
time.sleep(0.002 * sleepms)
def send1(pin, sleepms):
GPIO.output(pin, True)
time.sleep(0.003 * sleepms)
GPIO.output(pin, False)
time.sleep(0.001 * sleepms)
#### #### #### ####
| {
"repo_name": "lyriarte/dummycomm",
"path": "gpio_comm.py",
"copies": "1",
"size": "1302",
"license": "bsd-2-clause",
"hash": 9105609945551670000,
"line_mean": 19.34375,
"line_max": 43,
"alpha_frac": 0.6466973886,
"autogenerated": false,
"ratio": 2.547945205479452,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3694642594079452,
"avg_score": null,
"num_lines": null
} |
# # 50.012 Networks Lab 2 code skeleton
# Based on old code from Kurose & Ross: Computer Networks
# Streamlined, threaded, ported to HTTP/1.1
from socket import *
from thread import *
import sys,os
BUFFER_SIZE = 4096
PROXY_PORT = 8080
def clientthread(tcpCliSock):
message = tcpCliSock.recv(BUFFER_SIZE)
print message
# Extract the parameters from the given message
# we need to fill "host": the host name in the request
# we need to fill "resource": the resource requested on the target system
# we need to fill "filetouse": an escaped valid path to the cache file. could be hash value as well
host = None
resource = None
import re
m = re.search(r'GET https*://([^/]*)(.*) HTTP/1.1',message)
if m:
print "host from first line: "+m.group(1)
print "resource from first line: "+m.group(2)
host=m.group(1)
resource=m.group(2)
# Extract Host
m = re.search(r'Host: ([\S]*)\n',message)
if m:
print "host from Host:"+m.group(1)
host=m.group(1)
if host==None or resource==None:
print "ERROR: no host found"
return
# Extract Accept
accept=""
m = re.search(r'Accept: ([\S]*)\n',message)
if m:
print "accept: "+m.group(1)
accept=m.group(1)
# lets not do connection-alive
message=message.replace("Connection: keep-alive","Connection: close")
# generate our cache file name
import hashlib
m = hashlib.md5()
m.update(host+resource)
filetouse=m.hexdigest()
fileExist = False
print "Host: "+host
print "Resource: "+resource
try:
# Check wether the file exist in the cache
f = open(filetouse, "r")
outputdata = f.readlines()
fileExist = True
f.close()
# send out the cached file content here
for i in outputdata:
tcpCliSock.send(i)
tcpCliSock.close()
print 'Read from cache'
# Error handling for file not found in cache
except IOError:
if fileExist == False:
c = socket(AF_INET, SOCK_STREAM) # Create a new client socket here
c.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
os.environ['http_proxy']=''
print 'host is ' + host + " resource is" + resource
try:
# Connect to the socket to port 80
c.connect((host, 80))
# Create a temporary file on this socket and ask port 80 for the file requested by the client
fileobj = c.makefile()
fileobj.write("GET "+ resource + " HTTP/1.1\r\n")
fileobj.write("Host: "+host+"\r\n")
if accept:
fileobj.write("Accept: "+accept+"\r\n")
fileobj.write("Connection: close\r\n")
fileobj.write("\r\n")
fileobj.flush()
# Read the response into buffer
outputdata = fileobj.readlines()
fileobj.close()
c.close()
# Create a new file in the cache for the requested file.
# Also send the response in the buffer to client socket and the corresponding file in the cache
print "received response, saving in "+filetouse
tmpFile = open("./" + filetouse,"wb")
# save returned data into cache file
for i in outputdata:
tmpFile.write(i)
tmpFile.close()
# also send returned data to original client
for i in outputdata:
tcpCliSock.send(i)
except gaierror as e:
print e
except:
print "Illegal request"+str(sys.exc_info()[0])
else:
# HTTP response message for file not found
tcpCliSock.send("404 Not Found\n")
# Close the client and the server sockets
tcpCliSock.close()
# Create a server socket, bind it to a port and start listening
tcpSerSock = socket(AF_INET, SOCK_STREAM)
tcpSerSock.bind(('localhost', PROXY_PORT))
tcpSerSock.listen(1)
while 1:
# Start receiving data from the client
print 'Ready to serve...'
tcpCliSock, addr = tcpSerSock.accept()
print 'Received a connection from:', addr
#start new thread takes 1st argument as a function name to be run, second is the tuple of arguments to the function.
start_new_thread(clientthread ,(tcpCliSock,))
#clientthread(tcpCliSock)
| {
"repo_name": "LYZhelloworld/Courses",
"path": "50.012/02/proxyServer.py",
"copies": "1",
"size": "4605",
"license": "mit",
"hash": 1967903279069412900,
"line_mean": 36.1370967742,
"line_max": 120,
"alpha_frac": 0.5711183496,
"autogenerated": false,
"ratio": 4.093333333333334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5164451682933333,
"avg_score": null,
"num_lines": null
} |
""" 500Hz Volta Box
"""
import logging
import queue as q
import time
from volta.common.interfaces import VoltaBox
from volta.common.util import TimeChopper
from netort.data_processing import Drain
logger = logging.getLogger(__name__)
class VoltaBox500Hz(VoltaBox):
""" VoltaBox500Hz - works with plain-text 500hz box, grabs data and stores data to queue """
def __init__(self, config, core):
VoltaBox.__init__(self, config, core)
self.sample_rate = config.get_option('volta', 'sample_rate', 500)
self.baud_rate = config.get_option('volta', 'baud_rate', 115200)
# initialize data source
self.source_opener.baud_rate = self.baud_rate
self.source_opener.read_timeout = self.grab_timeout
self.data_source = self.source_opener()
logger.debug('Data source initialized: %s', self.data_source)
self.my_metrics = {}
def __create_my_metrics(self):
self.my_metrics['currents'] = self.core.data_session.new_metric()
def start_test(self, results):
""" Grab stage - starts grabber thread and puts data to results queue
+clean up dirty buffer
pipeline
read source data ->
chop by samplerate w/ ratio ->
make pandas DataFrame ->
drain DataFrame to queue `results`
"""
logger.info('volta start test')
self.grabber_q = results
# clean up dirty buffer
for _ in range(self.sample_rate):
self.data_source.readline()
logger.info('reader init?')
self.reader = BoxPlainTextReader(
self.data_source, self.sample_rate
)
logger.info('reader init!')
self.pipeline = Drain(
TimeChopper(
self.reader, self.sample_rate, self.chop_ratio
),
self.my_metrics['currents']
)
logger.info('Starting grab thread...')
self.pipeline.start()
logger.debug('Waiting grabber thread finish...')
def end_test(self):
self.reader.close()
self.pipeline.close()
self.pipeline.join(10)
self.data_source.close()
def get_info(self):
data = {}
if self.pipeline:
data['grabber_alive'] = self.pipeline.isAlive()
if self.grabber_q:
data['grabber_queue_size'] = self.grabber_q.qsize()
return data
class BoxPlainTextReader(object):
"""
Read chunks from source, convert and return numpy.array
"""
def __init__(self, source, cache_size=1024 * 1024 * 10):
self.closed = False
self.cache_size = cache_size
self.source = source
self.buffer = ""
def _read_chunk(self):
data = self.source.read(self.cache_size)
logger.info('Data:%s', data)
if data:
parts = data.rsplit('\n', 1)
if len(parts) > 1:
ready_chunk = self.buffer + parts[0] + '\n'
self.buffer = parts[1]
# FIXME string_to_np(ready_chunk, type=float, sep='\n')
#return string_to_np(ready_chunk, type=float, sep='\n')
else:
self.buffer += parts[0]
else:
self.buffer += self.source.readline()
return None
def __iter__(self):
while not self.closed:
yield self._read_chunk()
yield self._read_chunk()
def close(self):
self.closed = True
# ==================================================
def main():
logging.basicConfig(
level="DEBUG",
format='%(asctime)s [%(levelname)s] [Volta 500hz] %(filename)s:%(lineno)d %(message)s')
logger.info("Volta 500 hz box ")
cfg = {
'source': '/dev/cu.wchusbserial1420'
# 'source': '/Users/netort/output.bin'
}
worker = VoltaBox500Hz(cfg)
logger.info('worker args: %s', worker.__dict__)
grabber_q = q.Queue()
worker.start_test(grabber_q)
time.sleep(10)
logger.info('test finishing...')
worker.end_test()
logger.info('Queue size after test: %s', grabber_q.qsize())
logger.info('1st sample:\n %s', grabber_q.get())
logger.info('2nd sample:\n %s', grabber_q.get())
logger.info('3rd sample:\n %s', grabber_q.get())
logger.info('test finished')
if __name__ == "__main__":
main() | {
"repo_name": "yandex-load/volta",
"path": "volta/providers/boxes/box500hz.py",
"copies": "1",
"size": "4353",
"license": "mpl-2.0",
"hash": -255142881213687840,
"line_mean": 29.8794326241,
"line_max": 96,
"alpha_frac": 0.5688031243,
"autogenerated": false,
"ratio": 3.7429062768701633,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9806935825499735,
"avg_score": 0.00095471513408561,
"num_lines": 141
} |
# 50m maps of all states (10m Alaska shape)
import os
import sys
import argparse
from mapnik_xml import *
# Global variables
xd_width, xd_height = 1200, 900
base_dir = '/Users/monad/Work/data'
cult10m_dir = os.path.join(base_dir, '10m_cultural', '10m_cultural')
phys10m_dir = os.path.join(base_dir, '10m_physical')
cult50m_dir = os.path.join(base_dir, '50m_cultural')
phys50m_dir = os.path.join(base_dir, '50m_physical')
land_file = os.path.join(phys50m_dir, 'ne_50m_land.shp')
land_boundaries_file = os.path.join(phys50m_dir, 'ne_50m_land.shp')
countries_file = os.path.join(cult50m_dir, 'ne_50m_admin_0_countries.shp')
state_boundaries_file = os.path.join(cult50m_dir, 'ne_50m_admin_1_states_provinces_lines.shp')
lakes_file = os.path.join(phys50m_dir, 'ne_50m_lakes.shp')
country_boundaries_file = os.path.join(cult50m_dir, 'ne_50m_admin_0_boundary_lines_land.shp')
states10m_file = os.path.join(cult10m_dir, 'ne_10m_admin_1_states_provinces_shp.shp')
states50m_file =os.path.join(cult50m_dir, 'ne_50m_admin_1_states_provinces_shp.shp')
#land_file = os.path.join(phys10m_dir, 'ne_10m_land.shp')
#land_file = os.path.join(cult10m_dir, 'ne_10m_admin_0_sovereignty.shp')
#land_boundaries_file = os.path.join(phys10m_dir, 'ne_10m_land.shp')
#boundaries_file = os.path.join(cult10m_dir, 'ne_10m_admin_0_boundary_lines_land.shp')
#boundaries_file = os.path.join(edited50m_dir, 'ne_50m_admin_0_boundary_lines_land.shp')
#countries_file = os.path.join(cult10m_dir, 'ne_10m_admin_0_countries.shp')
#lakes_file = os.path.join(phys10m_dir, 'ne_10m_lakes.shp')
lakes_size = 3
proj4_usa = '+proj=lcc +lat_1=33 +lat_2=45 +lat_0=39 +lon_0=-96 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs'
coords_48 = (-2507299.94,-1797501.58,2291598.94,1801672.58)
coords_ne = (230348, -289948,2331777,1276571)
coords_all = (-6377775,-2106929,2244702,4808764)
def report_error(msg):
sys.stderr.write("**ERROR**: {0}\n".format(msg))
parser = argparse.ArgumentParser(description="Creates maps for all 50 states")
size_group = parser.add_mutually_exclusive_group()
size_group.add_argument('--size', nargs=2, metavar=('W', 'H'),
type=int, default=(xd_width, xd_height),
help="the size of output images")
size_group.add_argument('--xd', action='store_true',
help="equivalent to --size {0} {1}".format(xd_width, xd_height))
size_group.add_argument('--hd', action='store_true',
help="0.5 * (xd size)")
size_group.add_argument('--sd', action='store_true',
help="0.25 * (xd size)")
parser.add_argument('--png8', action='store_true',
help="8-bit PNG images")
parser.add_argument('--suffix', default="-map",
help="the suffix for output image names")
parser.add_argument('--out', dest="out_dir", metavar='DIR',
help="the output directory")
parser.add_argument('--color', default='red',
help="polygon fill color (use 'none' for no color)")
parser.add_argument('--no-border', action='store_true',
help="do no render borders and the background")
#parser.add_argument('--line-color', default='black',
# help="border line color (use 'none' for no borders)")
parser.add_argument('--scale', type=float, default=1.0,
help="scale for lines")
parser.add_argument('states', nargs='*',
help="create images for given states only")
parser.add_argument('--debug', action='store_true',
help="debug mode")
# Parse and validate arguments
args = parser.parse_args()
if args.sd:
width, height = int(xd_width * 0.25), int(xd_height * 0.25)
args.scale *= 0.25
elif args.hd:
width, height = int(xd_width * 0.5), int(xd_height * 0.5)
args.scale *= 0.5
elif args.xd:
width, height = xd_width, xd_height
elif args.size:
width, height = args.size
else:
width, height = xd_width, xd_height
if args.color == 'none':
args.color = None
#if args.line_color == 'none':
# args.line_color = None
if args.scale < 0.01 or args.scale > 10:
report_error("Bad scale: {0}".format(args.scale))
sys.exit(1)
if width < 1 or height < 1 or width > 10000 or height > 10000:
report_error("Bad image size: {0} x {1}".format(width, height))
sys.exit(1)
if not args.out_dir:
args.out_dir = "out_maps_of_states_{0}_{1}".format(width, height)
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
out_format = 'png256' if args.png8 else 'png'
print("Size: {0} x {1}, scale: {2}".format(width, height, args.scale))
# Land
def land_layer():
s = Style("Land")
s.symbols.append(PolygonSymbolizer('#f1daa0'))
return Layer("Land", land_file, s)
# USA
def usa_layer():
s = Style("USA")
s.filter = "[admin] = 'United States of America'"
s.symbols.append(PolygonSymbolizer('white'))
return Layer("USA", countries_file, s)
# Land boundaries
def land_boundaries_layer():
s = Style("Land Boundaries")
s.symbols.append(LineSymbolizer('#4fadc2', 1.5))
return Layer("Land Boundaries", land_boundaries_file, s)
# State boundaries
def state_boundaries_layer():
s = Style("State Boundaries")
s.filter = "[adm0_name] = 'United States of America'"
s.symbols.append(LineSymbolizer('#808080', 1.5))
return Layer("State Boundaries", state_boundaries_file, s)
# Lakes
def lakes_layer():
s = Style("Lakes")
s.filter = "[scalerank] <= {0}".format(lakes_size)
s.symbols.append(PolygonSymbolizer('#b3e2ee'))
s.symbols.append(LineSymbolizer('#4fadc2', 1.5))
return Layer("Lakes", lakes_file, s)
# Boundaries of countries
def country_boundaries_layer():
s = Style("Country Boundaries")
ls = LineSymbolizer('black', 3.0)
ls.add_dash(8, 4)
ls.add_dash(2, 2)
ls.add_dash(2, 2)
s.symbols.append(ls)
return Layer("Country Boundaries", country_boundaries_file, s)
# A state
def state_layer(state_abbrev, flag10=False):
s = Style(state_abbrev)
s.filter = "[iso_3166_2] = 'US-{0}'".format(state_abbrev.upper())
if args.color:
ps = PolygonSymbolizer(args.color)
s.symbols.append(ps)
ds = states10m_file if flag10 else states50m_file
return Layer(state_abbrev, ds, s)
# Main map functions
def create_map(proj, coords, width, height):
m = Map(width, height, proj, coords)
if args.no_border:
m.layers.append(land_layer())
m.layers.append(usa_layer())
m.layers.append(lakes_layer())
else:
#m.background = '#b3e2ee80'
m.background = '#b3e2ee'
m.layers.append(land_layer())
m.layers.append(usa_layer())
m.layers.append(land_boundaries_layer())
m.layers.append(state_boundaries_layer())
m.layers.append(lakes_layer())
m.layers.append(country_boundaries_layer())
return m
def render_states(m, states, subdir=None, suffix=""):
for state in states:
print("Processing: {0}".format(state))
if args.no_border:
pos = 2
while len(m.layers) > 3:
del m.layers[pos]
else:
pos = 3
while len(m.layers) > 6:
del m.layers[pos]
layer = state_layer(state, flag10=(state == 'AK'))
m.layers[pos:pos] = [layer]
out_name = '{0}{1}.png'.format(state.lower(), suffix)
out_path = os.path.join(subdir, out_name) if subdir else out_name
render_map(m, os.path.join(args.out_dir, out_path),
out_format=out_format, scale=args.scale, debug=args.debug)
# The main script
map_48 = create_map(proj4_usa, coords_48, width, height)
map_ne = create_map(proj4_usa, coords_ne, width, height)
map_all = create_map(proj4_usa, coords_all, width, height)
render_map(map_48, os.path.join(args.out_dir, '48', 'a.png'),
out_format=out_format, scale=args.scale, debug=args.debug)
render_map(map_ne, os.path.join(args.out_dir, 'ne', 'a.png'),
out_format=out_format, scale=args.scale, debug=args.debug)
render_map(map_all, os.path.join(args.out_dir, 'all', 'a.png'),
out_format=out_format, scale=args.scale, debug=args.debug)
# Render states
states = ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DE', 'FL', 'GA',
'HI', 'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD',
'ME', 'MI', 'MN', 'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH',
'NJ', 'NM', 'NV', 'NY', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC',
'SD', 'TN', 'TX', 'UT', 'VA', 'VT', 'WA', 'WI', 'WV', 'WY']
ne_states = ['CT', 'DE', 'MA', 'MD', 'NH', 'NJ', 'RI', 'VT']
far_states = ['AK', 'HI']
if args.states:
test = lambda s: s in args.states
states = filter(test, states)
ne_states = filter(test, ne_states)
far_states = filter(test, far_states)
# Render states
render_states(map_48, set(states) - set(ne_states + far_states), subdir="48", suffix=args.suffix)
# Render northeastern states
print("\nRendering northeastern states")
render_states(map_ne, ne_states, subdir="ne", suffix=args.suffix)
# Render Alaska and Hawaii
print("\nRendering Alaska and Hawaii")
render_states(map_all, far_states, subdir="all", suffix=args.suffix)
print("done")
| {
"repo_name": "monadius/mapnik2_maps",
"path": "usa50_50m_xml.py",
"copies": "1",
"size": "9239",
"license": "mit",
"hash": -8565631197804279000,
"line_mean": 32.7189781022,
"line_max": 109,
"alpha_frac": 0.6274488581,
"autogenerated": false,
"ratio": 2.8462723351817623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3973721193281762,
"avg_score": null,
"num_lines": null
} |
# 50m maps of all states (10m Alaska shape)
import os
import sys
import argparse
# Global variables
xd_width, xd_height = 1200, 900
base_dir = '/Users/monad/Work/data'
cult10m_dir = os.path.join(base_dir, '10m_cultural', '10m_cultural')
phys10m_dir = os.path.join(base_dir, '10m_physical')
cult50m_dir = os.path.join(base_dir, '50m_cultural')
phys50m_dir = os.path.join(base_dir, '50m_physical')
land_file = os.path.join(phys50m_dir, 'ne_50m_land.shp')
land_boundaries_file = os.path.join(phys50m_dir, 'ne_50m_land.shp')
countries_file = os.path.join(cult50m_dir, 'ne_50m_admin_0_countries.shp')
state_boundaries_file = os.path.join(cult50m_dir, 'ne_50m_admin_1_states_provinces_lines.shp')
lakes_file = os.path.join(phys50m_dir, 'ne_50m_lakes.shp')
country_boundaries_file = os.path.join(cult50m_dir, 'ne_50m_admin_0_boundary_lines_land.shp')
states10m_file = os.path.join(cult10m_dir, 'ne_10m_admin_1_states_provinces_shp.shp')
states50m_file =os.path.join(cult50m_dir, 'ne_50m_admin_1_states_provinces_shp.shp')
#land_file = os.path.join(phys10m_dir, 'ne_10m_land.shp')
#land_file = os.path.join(cult10m_dir, 'ne_10m_admin_0_sovereignty.shp')
#land_boundaries_file = os.path.join(phys10m_dir, 'ne_10m_land.shp')
#boundaries_file = os.path.join(cult10m_dir, 'ne_10m_admin_0_boundary_lines_land.shp')
#boundaries_file = os.path.join(edited50m_dir, 'ne_50m_admin_0_boundary_lines_land.shp')
#countries_file = os.path.join(cult10m_dir, 'ne_10m_admin_0_countries.shp')
#lakes_file = os.path.join(phys10m_dir, 'ne_10m_lakes.shp')
lakes_size = 3
proj4_usa = '+proj=lcc +lat_1=33 +lat_2=45 +lat_0=39 +lon_0=-96 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs'
coords_48 = (-2607277,-1554066,2391576,1558237)
zoom_48 = 0.96
coords_ne = (230348, -289948,2331777,1276571)
coords_all = (-6377775,-2106929,2244702,4808764)
def report_error(msg):
sys.stderr.write("**ERROR**: {0}\n".format(msg))
parser = argparse.ArgumentParser(description="Creates maps for all 50 states")
size_group = parser.add_mutually_exclusive_group()
size_group.add_argument('--size', nargs=2, metavar=('W', 'H'),
type=int, default=(xd_width, xd_height),
help="the size of output images")
size_group.add_argument('--xd', action='store_true',
help="equivalent to --size {0} {1}".format(xd_width, xd_height))
size_group.add_argument('--hd', action='store_true',
help="0.5 * (xd size)")
size_group.add_argument('--sd', action='store_true',
help="0.25 * (xd size)")
parser.add_argument('--png8', action='store_true',
help="8-bit PNG images")
parser.add_argument('--suffix', default="-map",
help="the suffix for output image names")
parser.add_argument('--out', dest="out_dir", metavar='DIR',
help="the output directory")
parser.add_argument('--color', default='red',
help="polygon fill color (use 'none' for no color)")
parser.add_argument('--no-border', action='store_true',
help="do no render borders and the background")
#parser.add_argument('--line-color', default='black',
# help="border line color (use 'none' for no borders)")
parser.add_argument('--scale', type=float, default=1.0,
help="scale for lines")
parser.add_argument('states', nargs='*',
help="create images for given states only")
# Parse and validate arguments
args = parser.parse_args()
if args.sd:
width, height = int(xd_width * 0.25), int(xd_height * 0.25)
args.scale *= 0.25
elif args.hd:
width, height = int(xd_width * 0.5), int(xd_height * 0.5)
args.scale *= 0.5
elif args.xd:
width, height = xd_width, xd_height
elif args.size:
width, height = args.size
else:
width, height = xd_width, xd_height
if args.color == 'none':
args.color = None
#if args.line_color == 'none':
# args.line_color = None
if args.scale < 0.01 or args.scale > 10:
report_error("Bad scale: {0}".format(args.scale))
sys.exit(1)
if width < 1 or height < 1 or width > 10000 or height > 10000:
report_error("Bad image size: {0} x {1}".format(width, height))
sys.exit(1)
if not args.out_dir:
args.out_dir = "out_maps_of_states_{0}_{1}".format(width, height)
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
out_format = 'png256' if args.png8 else 'png'
print("Size: {0} x {1}, scale: {2}".format(width, height, args.scale))
# Styles and layers
def add_layer_with_style(m, layer, style, style_name=None):
if not style_name:
style_name = layer.name + 'Style'
m.append_style(style_name, style)
layer.styles.append(style_name)
m.layers.append(layer)
# Land
def land_style():
s = Style()
r = Rule()
ps = PolygonSymbolizer()
ps.fill = Color('#f1daa0')
r.symbols.append(ps)
# stk = Stroke(Color('#4fadc2'), 5.0)
# ls = LineSymbolizer(stk)
# r.symbols.append(ls)
s.rules.append(r)
return s
def land_layer():
ds = Shapefile(file=land_file)
layer = Layer('Land')
layer.datasource = ds
return layer
# USA
def usa_style():
s = Style()
r = Rule()
r.filter = Expression("[admin] = 'United States of America'")
ps = PolygonSymbolizer()
ps.fill = Color('white')
r.symbols.append(ps)
s.rules.append(r)
return s
def usa_layer():
ds = Shapefile(file=countries_file)
layer = Layer('USA')
layer.datasource = ds
return layer
# Land boundaries
def land_boundaries_style():
s = Style()
r = Rule()
# r.filter = Expression("[admin] = 'United States of America'")
ls = LineSymbolizer()
ls.stroke = Color('#4fadc2')
ls.stroke_width = 1.5
r.symbols.append(ls)
# ps = PolygonSymbolizer()
# ps.fill = Color('red')
# r.symbols.append(ps)
s.rules.append(r)
return s
def land_boundaries_layer():
ds = Shapefile(file=land_boundaries_file)
layer = Layer('Land Boundaries')
layer.datasource = ds
return layer
# State boundaries
def state_boundaries_style():
s = Style()
r = Rule()
r.filter = Expression("[adm0_name] = 'United States of America'")
ls = LineSymbolizer()
ls.stroke = Color('#808080')
ls.stroke_width = 1.5
r.symbols.append(ls)
s.rules.append(r)
return s
def state_boundaries_layer():
ds = Shapefile(file=state_boundaries_file)
layer = Layer('State Boundaries')
layer.datasource = ds
return layer
# Lakes
def lakes_style():
s = Style()
r = Rule()
r.filter = Expression("[scalerank] <= {0}".format(lakes_size))
ps = PolygonSymbolizer()
ps.fill = Color('#b3e2ee')
r.symbols.append(ps)
ls = LineSymbolizer()
ls.stroke = Color('#4fadc2')
ls.stroke_width = 1.5
r.symbols.append(ls)
s.rules.append(r)
return s
def lakes_layer():
ds = Shapefile(file=lakes_file)
layer = Layer('Lakes')
layer.datasource = ds
return layer
# Boundaries of countries
def country_boundaries_style():
s = Style()
r = Rule()
# stk = Stroke()
# stk.add_dash(8, 4)
# stk.add_dash(2, 2)
# stk.add_dash(2, 2)
# stk.color = Color('black')
# stk.width = 3.0
ls = LineSymbolizer()
ls.stroke = Color('black')
ls.stroke_width = 3.0
# TODO: does it work?
ls.stroke_dasharray = "[(8,4),(2,2),(2,2)]"
r.symbols.append(ls)
s.rules.append(r)
return s
def country_boundaries_layer():
ds = Shapefile(file=country_boundaries_file)
layer = Layer('Country Boundaries')
layer.datasource = ds
return layer
# A state
def state_style(state_abbrev):
s = Style()
r = Rule()
r.filter = Expression("[iso_3166_2] = 'US-{0}'".format(state_abbrev.upper()))
if args.color:
ps = PolygonSymbolizer()
ps.fill = Color(args.color)
r.symbols.append(ps)
s.rules.append(r)
return s
def state_layer(state_abbrev, flag10=False):
if flag10:
ds = Shapefile(file=states10m_file)
else:
ds = Shapefile(file=states50m_file)
layer = Layer(state_abbrev)
layer.datasource = ds
return layer
# Main map functions
def create_map(proj, coords, width, height, zoom=None):
m = Map(width, height, proj)
if args.no_border:
add_layer_with_style(m, land_layer(), land_style())
add_layer_with_style(m, usa_layer(), usa_style())
add_layer_with_style(m, lakes_layer(), lakes_style())
else:
#m.background = Color('#b3e2ee80')
m.background = Color('#b3e2ee')
add_layer_with_style(m, land_layer(), land_style())
add_layer_with_style(m, usa_layer(), usa_style())
add_layer_with_style(m, land_boundaries_layer(), land_boundaries_style())
add_layer_with_style(m, state_boundaries_layer(), state_boundaries_style())
add_layer_with_style(m, lakes_layer(), lakes_style())
add_layer_with_style(m, country_boundaries_layer(), country_boundaries_style())
m.zoom_to_box(Box2d(*coords))
if zoom:
m.zoom(zoom)
return m
def render_states(m, states, prefix="", suffix=""):
for state in states:
print("Processing: {0}".format(state))
if args.no_border:
pos = 2
while len(m.layers) > 3:
del m.layers[pos]
else:
pos = 3
while len(m.layers) > 6:
del m.layers[pos]
style = state_style(state)
layer = state_layer(state, flag10=(state == 'AK'))
style_name = layer.name + 'Style'
m.append_style(style_name, style)
layer.styles.append(style_name)
m.layers[pos:pos] = layer
out_name = '{0}{1}{2}.png'.format(prefix, state.lower(), suffix)
render_to_file(m, os.path.join(args.out_dir, out_name),
out_format, args.scale)
# The main script
from mapnik import *
map_48 = create_map(proj4_usa, coords_48, width, height, zoom=zoom_48)
map_ne = create_map(proj4_usa, coords_ne, width, height)
map_all = create_map(proj4_usa, coords_all, width, height)
render_to_file(map_48, os.path.join(args.out_dir, 'a_48.png'),
out_format, args.scale)
render_to_file(map_ne, os.path.join(args.out_dir, 'a_ne.png'),
out_format, args.scale)
render_to_file(map_all, os.path.join(args.out_dir, 'a_all.png'),
out_format, args.scale)
# Render states
states = ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DE', 'FL', 'GA',
'HI', 'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD',
'ME', 'MI', 'MN', 'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH',
'NJ', 'NM', 'NV', 'NY', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC',
'SD', 'TN', 'TX', 'UT', 'VA', 'VT', 'WA', 'WI', 'WV', 'WY']
ne_states = ['CT', 'DE', 'MA', 'MD', 'NH', 'NJ', 'RI', 'VT']
far_states = ['AK', 'HI']
if args.states:
test = lambda s: s in args.states
states = filter(test, states)
ne_states = filter(test, ne_states)
far_states = filter(test, far_states)
# Render all states (48 visible)
render_states(map_48, states, suffix=args.suffix)
# Render northeastern states
print("\nRendering northeastern states")
render_states(map_ne, ne_states, prefix="ne_", suffix=args.suffix)
# Render Alaska and Hawaii
print("\nRendering Alaska and Hawaii")
render_states(map_all, far_states, prefix="all_", suffix=args.suffix)
print("done")
| {
"repo_name": "monadius/mapnik2_maps",
"path": "usa50_50m.py",
"copies": "1",
"size": "11452",
"license": "mit",
"hash": -1064825273717549400,
"line_mean": 27.7738693467,
"line_max": 109,
"alpha_frac": 0.6139538945,
"autogenerated": false,
"ratio": 2.9014441347859132,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8991047357975077,
"avg_score": 0.0048701342621670415,
"num_lines": 398
} |
# 50
# Find the prime below 1000000, which can be written as sum of most consecutive primes
# 997651
from datetime import datetime
from sympy import sieve
from prime import prime
class Solve(object):
def __init__(self):
pass
def solve(self):
# primes under 1000000: 78498
# prime.prime costs 3min 56sec
# sympy.sieve: 0.6sec, 100x faster
# primes = list(prime.primes_between(end=1000000))
primes = list(sieve.primerange(2, 1000000))
lenp = len(primes)
csums = [0]*(lenp+1)
max_length = 1
result = 0
for i in range(lenp):
csums[i+1] = csums[i] + primes[i]
def bisearch_prime(x, primes):
lo, hi = 0, len(primes)
while lo < hi:
mid = (lo+hi)//2
if x < primes[mid]:
hi = mid - 1
elif x > primes[mid]:
lo = mid + 1
else:
return True
return False
for end_idx in range(lenp+1):
# start_idx: inclusive, end_idx: exclusive
# 42sec
for start_idx in reversed(range(end_idx-max_length)):
target = csums[end_idx] - csums[start_idx]
if target > 1000000:
break
if bisearch_prime(target, primes):
max_length = end_idx - start_idx
result = target
# for start_idx in range(lenp):
# # As for double-loop infinite sequence, loop end index in outer loop is better.
# # 40sec
# for end_idx in range(start_idx+max_length+1, lenp+1):
# target = csums[end_idx] - csums[start_idx]
# if target > 1000000:
# break
# if bisearch_prime(target, primes):
# max_length = end_idx - start_idx
# result = target
return result
start = datetime.now()
s = Solve()
print s.solve()
print datetime.now() - start
| {
"repo_name": "daicang/Euler",
"path": "p50.py",
"copies": "1",
"size": "2073",
"license": "mit",
"hash": -7824557918621743000,
"line_mean": 27.7916666667,
"line_max": 93,
"alpha_frac": 0.5012059817,
"autogenerated": false,
"ratio": 3.8247232472324724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4825929228932472,
"avg_score": null,
"num_lines": null
} |
"""5_1_1 to 5_2
- Add columns to blueprints table for the async. blueprints upload
Revision ID: 9d261e90b1f3
Revises: 5ce2b0cbb6f3
Create Date: 2020-11-26 14:07:36.053518
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
from cloudify.models_states import VisibilityState, BlueprintUploadState
from manager_rest.storage.models_base import JSONString, UTCDateTime
# revision identifiers, used by Alembic.
revision = '9d261e90b1f3'
down_revision = '5ce2b0cbb6f3'
branch_labels = None
depends_on = None
VISIBILITY_ENUM = postgresql.ENUM(*VisibilityState.STATES,
name='visibility_states',
create_type=False)
config_table = table(
'config',
column('name', sa.Text),
column('value', JSONString()),
column('schema', JSONString()),
column('is_editable', sa.Boolean),
column('updated_at', UTCDateTime()),
column('scope', sa.Text),
)
NEW_LDAP_CONFIG_ENTRIES = [
'ldap_group_members_filter',
'ldap_attribute_group_membership',
'ldap_base_dn',
'ldap_group_dn',
'ldap_bind_format',
'ldap_user_filter',
'ldap_group_member_filter',
'ldap_attribute_email',
'ldap_attribute_first_name',
'ldap_attribute_last_name',
'ldap_attribute_uid',
]
def upgrade():
upgrade_blueprints_table()
create_filters_table()
create_deployment_groups_table()
create_execution_schedules_table()
fix_previous_versions()
create_execution_groups_table()
add_new_config_entries()
set_null_on_maintenance_mode_cascade()
def downgrade():
revert_set_null_on_maintenance_mode_cascade()
remove_new_config_entries()
drop_execution_groups_table()
revert_fixes()
drop_execution_schedules_table()
drop_deployment_groups_table()
downgrade_blueprints_table()
drop_filters_table()
def add_new_config_entries():
op.bulk_insert(
config_table,
[
dict(
name=name,
value=None,
scope='rest',
schema={'type': 'string'},
is_editable=True,
)
for name in NEW_LDAP_CONFIG_ENTRIES
]
)
def remove_new_config_entries():
op.execute(
config_table.delete().where(
config_table.c.name.in_(NEW_LDAP_CONFIG_ENTRIES)
& (config_table.c.scope == op.inline_literal('rest'))
)
)
def upgrade_blueprints_table():
op.add_column('blueprints', sa.Column('state', sa.Text(), nullable=True))
op.add_column('blueprints', sa.Column('error', sa.Text(), nullable=True))
op.add_column('blueprints', sa.Column('error_traceback',
sa.Text(),
nullable=True))
op.alter_column('blueprints', 'main_file_name',
existing_type=sa.TEXT(),
nullable=True)
op.alter_column('blueprints', 'plan',
existing_type=postgresql.BYTEA(),
nullable=True)
op.execute(
f"update blueprints set state='{BlueprintUploadState.UPLOADED}'")
def downgrade_blueprints_table():
op.alter_column('blueprints', 'plan',
existing_type=postgresql.BYTEA(),
nullable=False)
op.alter_column('blueprints', 'main_file_name',
existing_type=sa.TEXT(),
nullable=False)
op.drop_column('blueprints', 'state')
op.drop_column('blueprints', 'error')
op.drop_column('blueprints', 'error_traceback')
def create_filters_table():
op.create_table(
'filters',
sa.Column('_storage_id',
sa.Integer(),
autoincrement=True,
nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column('value', JSONString(), nullable=True),
sa.Column('visibility', VISIBILITY_ENUM, nullable=True),
sa.Column('created_at', UTCDateTime(), nullable=False),
sa.Column('updated_at', UTCDateTime(), nullable=True),
sa.Column('_tenant_id', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_creator_id'],
[u'users.id'],
name=op.f('filters__creator_id_fkey'),
ondelete='CASCADE'),
sa.ForeignKeyConstraint(
['_tenant_id'],
[u'tenants.id'],
name=op.f('filters__tenant_id_fkey'),
ondelete='CASCADE'),
sa.PrimaryKeyConstraint(
'_storage_id',
name=op.f('filters_pkey')),
)
op.create_index(op.f('filters__tenant_id_idx'),
'filters',
['_tenant_id'],
unique=False)
op.create_index(op.f('filters_created_at_idx'),
'filters',
['created_at'],
unique=False)
op.create_index(op.f('filters_id_idx'),
'filters',
['id'],
unique=False)
op.create_index(op.f('filters__creator_id_idx'),
'filters',
['_creator_id'],
unique=False)
op.create_index(op.f('filters_visibility_idx'),
'filters',
['visibility'],
unique=False)
op.create_index('filters_id__tenant_id_idx',
'filters',
['id', '_tenant_id'],
unique=True)
def drop_filters_table():
op.drop_index(op.f('filters__tenant_id_idx'),
table_name='filters')
op.drop_index(op.f('filters_created_at_idx'),
table_name='filters')
op.drop_index(op.f('filters_id_idx'),
table_name='filters')
op.drop_index(op.f('filters__creator_id_idx'),
table_name='filters')
op.drop_index(op.f('filters_visibility_idx'),
table_name='filters')
op.drop_index('filters_id__tenant_id_idx',
table_name='filters')
op.drop_table('filters')
def create_deployment_groups_table():
op.create_table(
'deployment_groups',
sa.Column(
'_storage_id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column(
'visibility',
postgresql.ENUM(
'private', 'tenant', 'global', name='visibility_states',
create_type=False),
nullable=True
),
sa.Column('created_at', UTCDateTime(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('_default_blueprint_fk', sa.Integer(), nullable=True),
sa.Column('default_inputs', JSONString(), nullable=True),
sa.Column('_tenant_id', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_default_blueprint_fk'], ['blueprints._storage_id'],
name=op.f('deployment_groups__default_blueprint_fk_fkey'),
ondelete='SET NULL'
),
sa.ForeignKeyConstraint(
['_creator_id'], ['users.id'],
name=op.f('deployment_groups__creator_id_fkey'),
ondelete='CASCADE'
),
sa.ForeignKeyConstraint(
['_tenant_id'], ['tenants.id'],
name=op.f('deployment_groups__tenant_id_fkey'),
ondelete='CASCADE'
),
sa.PrimaryKeyConstraint(
'_storage_id', name=op.f('deployment_groups_pkey'))
)
op.create_index(
op.f('deployment_groups__default_blueprint_fk_idx'),
'deployment_groups',
['_default_blueprint_fk'],
unique=False
)
op.create_index(
op.f('deployment_groups__creator_id_idx'),
'deployment_groups',
['_creator_id'],
unique=False
)
op.create_index(
op.f('deployment_groups__tenant_id_idx'),
'deployment_groups',
['_tenant_id'],
unique=False
)
op.create_index(
op.f('deployment_groups_created_at_idx'),
'deployment_groups',
['created_at'],
unique=False
)
op.create_index(
op.f('deployment_groups_id_idx'),
'deployment_groups',
['id'],
unique=False
)
op.create_index(
op.f('deployment_groups_visibility_idx'),
'deployment_groups',
['visibility'],
unique=False
)
op.create_table(
'deployment_groups_deployments',
sa.Column('deployment_group_id', sa.Integer(), nullable=True),
sa.Column('deployment_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['deployment_group_id'],
['deployment_groups._storage_id'],
name=op.f('deployment_groups_deployments_deployment_grou_id_fkey')
),
sa.ForeignKeyConstraint(
['deployment_id'],
['deployments._storage_id'],
name=op.f('deployment_groups_deployments_deployment_id_fkey')
)
)
def drop_deployment_groups_table():
op.drop_table('deployment_groups_deployments')
op.drop_index(
op.f('deployment_groups__default_blueprint_fk_idx'),
table_name='deployment_groups')
op.drop_index(
op.f('deployment_groups_visibility_idx'),
table_name='deployment_groups')
op.drop_index(
op.f('deployment_groups_id_idx'), table_name='deployment_groups')
op.drop_index(
op.f('deployment_groups_created_at_idx'),
table_name='deployment_groups')
op.drop_index(
op.f('deployment_groups__tenant_id_idx'),
table_name='deployment_groups')
op.drop_index(
op.f('deployment_groups__creator_id_idx'),
table_name='deployment_groups')
op.drop_table('deployment_groups')
def create_execution_schedules_table():
op.create_table(
'execution_schedules',
sa.Column('_storage_id',
sa.Integer(),
autoincrement=True,
nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column('visibility', VISIBILITY_ENUM, nullable=True),
sa.Column('created_at', UTCDateTime(), nullable=False),
sa.Column('next_occurrence', UTCDateTime(), nullable=True),
sa.Column('since', UTCDateTime(), nullable=True),
sa.Column('until', UTCDateTime(), nullable=True),
sa.Column('rule', JSONString(), nullable=False),
sa.Column('slip', sa.Integer(), nullable=False),
sa.Column('workflow_id', sa.Text(), nullable=False),
sa.Column('parameters', JSONString(), nullable=True),
sa.Column('execution_arguments', JSONString(), nullable=True),
sa.Column('stop_on_fail',
sa.Boolean(),
nullable=False,
server_default='f'),
sa.Column('enabled',
sa.Boolean(),
nullable=False,
server_default='t'),
sa.Column('_deployment_fk', sa.Integer(), nullable=False),
sa.Column('_latest_execution_fk', sa.Integer(), nullable=True),
sa.Column('_tenant_id', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_creator_id'],
[u'users.id'],
name=op.f('execution_schedules__creator_id_fkey'),
ondelete='CASCADE'),
sa.ForeignKeyConstraint(
['_tenant_id'],
[u'tenants.id'],
name=op.f('execution_schedules__tenant_id_fkey'),
ondelete='CASCADE'),
sa.ForeignKeyConstraint(
['_deployment_fk'],
[u'deployments._storage_id'],
name=op.f('execution_schedules__deployment_fkey'),
ondelete='CASCADE'),
sa.PrimaryKeyConstraint(
'_storage_id',
name=op.f('execution_schedules_pkey')),
)
op.create_foreign_key(
op.f('execution_schedules__latest_execution_fk_fkey'),
'execution_schedules',
'executions',
['_latest_execution_fk'],
['_storage_id'],
ondelete='CASCADE'
)
op.create_index(op.f('execution_schedules_created_at_idx'),
'execution_schedules',
['created_at'],
unique=False)
op.create_index(op.f('execution_schedules_id_idx'),
'execution_schedules',
['id'],
unique=False)
op.create_index(op.f('execution_schedules__creator_id_idx'),
'execution_schedules',
['_creator_id'],
unique=False)
op.create_index(op.f('execution_schedules__tenant_id_idx'),
'execution_schedules',
['_tenant_id'],
unique=False)
op.create_index(op.f('execution_schedules_visibility_idx'),
'execution_schedules',
['visibility'],
unique=False)
op.create_index(op.f('execution_schedules_next_occurrence_idx'),
'execution_schedules',
['next_occurrence'],
unique=False)
op.create_index(op.f('execution_schedules__deployment_fk_idx'),
'execution_schedules',
['_deployment_fk'],
unique=False)
op.create_index(op.f('execution_schedules__latest_execution_fk_idx'),
'execution_schedules',
['_latest_execution_fk'],
unique=False)
def drop_execution_schedules_table():
op.drop_index(
op.f('execution_schedules_next_occurrence_idx'),
table_name='execution_schedules')
op.drop_index(
op.f('execution_schedules_visibility_idx'),
table_name='execution_schedules')
op.drop_index(
op.f('execution_schedules_id_idx'),
table_name='execution_schedules')
op.drop_index(
op.f('execution_schedules_created_at_idx'),
table_name='execution_schedules')
op.drop_index(
op.f('execution_schedules__tenant_id_idx'),
table_name='execution_schedules')
op.drop_index(
op.f('execution_schedules__creator_id_idx'),
table_name='execution_schedules')
op.drop_index(op.f('execution_schedules__latest_execution_fk_idx'),
table_name='execution_schedules')
op.drop_index(op.f('execution_schedules__deployment_fk_idx'),
table_name='execution_schedules')
op.drop_table('execution_schedules')
def fix_previous_versions():
op.execute('alter table deployments_labels rename CONSTRAINT '
'"{0}_key_value_key" to "deployments_labels_key_key";')
op.execute('alter INDEX deployments_labels__deployment_idx RENAME TO '
'deployments_labels__deployment_fk_idx')
op.create_index(op.f('deployments_labels_value_idx'),
'deployments_labels',
['value'],
unique=False)
op.create_index(op.f('permissions_role_id_idx'),
'permissions',
['role_id'],
unique=False)
op.drop_index('inter_deployment_dependencies_id_idx',
table_name='inter_deployment_dependencies')
op.create_index(op.f('inter_deployment_dependencies_id_idx'),
'inter_deployment_dependencies',
['id'],
unique=False)
def revert_fixes():
op.execute('alter table deployments_labels rename CONSTRAINT '
'"deployments_labels_key_key" to "{0}_key_value_key";')
op.execute('alter INDEX deployments_labels__deployment_fk_idx RENAME TO '
'deployments_labels__deployment_idx')
op.drop_index(op.f('deployments_labels_value_idx'),
table_name='deployments_labels')
op.drop_index(op.f('permissions_role_id_idx'), table_name='permissions')
op.drop_index(op.f('inter_deployment_dependencies_id_idx'),
table_name='inter_deployment_dependencies')
op.create_index('inter_deployment_dependencies_id_idx',
'inter_deployment_dependencies',
['id'],
unique=True)
def create_execution_groups_table():
op.create_table(
'execution_groups',
sa.Column(
'_storage_id',
sa.Integer(),
autoincrement=True,
nullable=False
),
sa.Column('id', sa.Text(), nullable=True),
sa.Column('visibility', VISIBILITY_ENUM, nullable=True),
sa.Column('created_at', UTCDateTime(), nullable=False),
sa.Column('_deployment_group_fk', sa.Integer(), nullable=True),
sa.Column('workflow_id', sa.Text(), nullable=False),
sa.Column('_tenant_id', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_creator_id'],
['users.id'],
name=op.f('execution_groups__creator_id_fkey'),
ondelete='CASCADE'
),
sa.ForeignKeyConstraint(
['_deployment_group_fk'],
['deployment_groups._storage_id'],
name=op.f('execution_groups__deployment_group_fk_fkey'),
ondelete='CASCADE'
),
sa.ForeignKeyConstraint(
['_tenant_id'],
['tenants.id'],
name=op.f('execution_groups__tenant_id_fkey'),
ondelete='CASCADE'
),
sa.PrimaryKeyConstraint(
'_storage_id',
name=op.f('execution_groups_pkey')
)
)
op.create_index(
op.f('execution_groups__creator_id_idx'),
'execution_groups',
['_creator_id'],
unique=False
)
op.create_index(
op.f('execution_groups__deployment_group_fk_idx'),
'execution_groups',
['_deployment_group_fk'],
unique=False
)
op.create_index(
op.f('execution_groups__tenant_id_idx'),
'execution_groups',
['_tenant_id'],
unique=False
)
op.create_index(
op.f('execution_groups_created_at_idx'),
'execution_groups',
['created_at'],
unique=False
)
op.create_index(
op.f('execution_groups_id_idx'),
'execution_groups',
['id'],
unique=False
)
op.create_index(
op.f('execution_groups_visibility_idx'),
'execution_groups',
['visibility'],
unique=False
)
op.create_table(
'execution_groups_executions',
sa.Column('execution_group_id', sa.Integer(), nullable=True),
sa.Column('execution_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['execution_group_id'],
['execution_groups._storage_id'],
name=op.f('execution_groups_executions_execution_grou_id_fkey'),
ondelete='CASCADE'
),
sa.ForeignKeyConstraint(
['execution_id'],
['executions._storage_id'],
name=op.f('execution_groups_executions_execution_id_fkey'),
ondelete='CASCADE'
)
)
def drop_execution_groups_table():
op.drop_table('execution_groups_executions')
op.drop_index(
op.f('execution_groups_visibility_idx'), table_name='execution_groups')
op.drop_index(
op.f('execution_groups_id_idx'), table_name='execution_groups')
op.drop_index(
op.f('execution_groups_created_at_idx'), table_name='execution_groups')
op.drop_index(
op.f('execution_groups__tenant_id_idx'), table_name='execution_groups')
op.drop_index(
op.f('execution_groups__deployment_group_fk_idx'),
table_name='execution_groups')
op.drop_index(
op.f('execution_groups__creator_id_idx'),
table_name='execution_groups')
op.drop_table('execution_groups')
def set_null_on_maintenance_mode_cascade():
"""Make maintenance_mode.requested_by a cascade=SET NULL"""
op.drop_constraint(
'maintenance_mode__requested_by_fkey',
'maintenance_mode',
type_='foreignkey'
)
op.create_foreign_key(
op.f('maintenance_mode__requested_by_fkey'),
'maintenance_mode',
'users',
['_requested_by'],
['id'],
ondelete='SET NULL'
)
def revert_set_null_on_maintenance_mode_cascade():
"""Make maintenance_mode.requested_by a cascade=DELETE
This reverts set_null_on_maintenance_mode_cascade
"""
op.drop_constraint(
op.f('maintenance_mode__requested_by_fkey'),
'maintenance_mode',
type_='foreignkey'
)
op.create_foreign_key(
'maintenance_mode__requested_by_fkey',
'maintenance_mode',
'users',
['_requested_by'],
['id'],
ondelete='CASCADE'
)
| {
"repo_name": "cloudify-cosmo/cloudify-manager",
"path": "resources/rest-service/cloudify/migrations/versions/9d261e90b1f3_5_1_1_to_5_2.py",
"copies": "1",
"size": "21192",
"license": "apache-2.0",
"hash": 8801369302997262000,
"line_mean": 33.6274509804,
"line_max": 79,
"alpha_frac": 0.5592676482,
"autogenerated": false,
"ratio": 3.9589015505324117,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 612
} |
# 5-1. Conditional Tests: Write a series of conditional tests. Print a statement describing each test
# and your prediction for the results of each test. Your code should look something like this:
# car = 'subaru'
# print("Is car == 'subaru'? I predict True.")
# print(car == 'subaru')
# print("\nIs car == 'audi'? I predict False.")
# print(car == 'audi')
# • Look closely at your results, and make sure you understand why each line evaluates to True or False.
# • Create at least 10 tests. Have at least 5 tests evaluate to True and another 5 tests evaluate to False.
bike = 'Mongoose'
print("Is bike == 'Mercurio'? I predict False.")
print(bike == 'Mercurio')
# ----------------------------------------------------------------------
dog = "black"
print("\nIs Daniel's dog == 'white'? I predict False. ")
print(dog == 'white')
# ----------------------------------------------------------------------
truck = "Chevrolet"
print("\nIs that truck == 'Ford'? I predict False. ")
print(dog == 'Ford')
# ----------------------------------------------------------------------
tennis = "Adidas"
print("\nAre these tennis == 'Nike'? I predict False. ")
print(dog == 'Nike')
# ----------------------------------------------------------------------
sister = 'dentist'
print("\nIs Ariel's sister a == 'engineer'? I predict False. ")
print(sister == 'engineer')
# ----------------------------------------------------------------------------------------------------------------------
shirt = 'large'
print("\nIs that shirt == 'large'? I predict True. ")
print(shirt == 'large')
# ----------------------------------------------------------------------
pizza = 'pepperoni'
print("\nDo those pizzas have == 'pepperoni'? I predict True. ")
print(pizza == 'pepperoni')
# ----------------------------------------------------------------------
cellphone = 'Samsung'
print("\nIs that cellphone == 'Samsung'? I predict True. ")
print(cellphone == 'Samsung')
# ----------------------------------------------------------------------
cap = 'green'
print("\nIs that cap == 'green'? I predict True. ")
print(cap == 'green')
# ----------------------------------------------------------------------
headphones = 'Panasonic'
print("\nAre these headphones == 'Panasonic'? I predict True. ")
print(headphones == 'Panasonic') | {
"repo_name": "AnhellO/DAS_Sistemas",
"path": "Ago-Dic-2019/DanielM/PracticaUno/5.1_ConditionalTests.py",
"copies": "1",
"size": "2313",
"license": "mit",
"hash": -1948489859523368400,
"line_mean": 34.5384615385,
"line_max": 120,
"alpha_frac": 0.4664356864,
"autogenerated": false,
"ratio": 4.079505300353357,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5045940986753357,
"avg_score": null,
"num_lines": null
} |
# 5.1 create a list of your favorite musicians
# musicians = ['Eminem', 'Björns vänner', 'Solen']
# print(musicians)
#
# # 5.2 Create a list of tuples with each yuple containing a coordinate
# cord_bro = ('55.7N', '14.1E')
# cord_lun = ('55.6N', '13.1E')
#
# places = [cord_bro, cord_lun]
# 5.3/4 Create a dictionary with facts
# personal_facts = {'height':'2m','weight':'94kg'}
#
# print(personal_facts)
#
# q = input("What do you want to know? ")
# if q in personal_facts:
# ans = personal_facts[q]
# else:
# ans = 'Not found'
#
# print(ans)
# 5.5 Map your favorite musicians to your favorite songs
# musicians = ['Eminem', 'Björns vänner', 'Solen']
#
# e_songs = ['lose yourself', 'the real slim shady', 'fuck you']
# b_songs = ['Snart kommer floden', 'Åkrar och himmel']
# s_songs = ['Stäng igen stan']
#
# playlist = {musicians[0]:e_songs, musicians[1]:b_songs, musicians[2]:s_songs}
#
# q = input('Which artist? ')
# if q in playlist:
# print(playlist[q])
# else:
# print('Not found')
"""
Explanation
:param name: type.
:return output:
"""
| {
"repo_name": "Frikeer/LearnPython",
"path": "exc5.py",
"copies": "1",
"size": "1072",
"license": "unlicense",
"hash": -3760388473789911600,
"line_mean": 23.2272727273,
"line_max": 79,
"alpha_frac": 0.6332082552,
"autogenerated": false,
"ratio": 2.4063205417607225,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.8539528796960723,
"avg_score": 0,
"num_lines": 44
} |
"""51. N-Queens
https://leetcode.com/problems/n-queens/description/
The n-queens puzzle is the problem of placing n queens on an n×n chessboard
such that no two queens attack each other.

Given an integer n, return all distinct solutions to the n-queens puzzle.
Each solution contains a distinct board configuration of the n-queens'
placement, where 'Q' and '.' both indicate a queen and an empty space
respectively.
Example:
Input: 4
Output: [
[".Q..", // Solution 1
"...Q",
"Q...",
"..Q."],
["..Q.", // Solution 2
"Q...",
"...Q",
".Q.."]
]
Explanation: There exist two distinct solutions to the 4-queens puzzle as
shown above.
"""
from typing import List
class Solution:
def solve_n_queens(self, n: int) -> List[List[str]]:
ans = []
def output(pos: int):
"""
Output the string format of row.
:param pos: the queen's position in current row.
:return:
"""
return "." * pos + "Q" + "." * (n - pos - 1)
def validate(row: int, col: int, queens: List[int]) -> bool:
"""
Validate current pos[row, col] if is safe.
:param row: current row index.
:param col: current column index.
:param queens: the columns of previous queens.
:return:
"""
for i in range(row):
cur_queen_col = queens[i]
if row == i:
return False
if col == cur_queen_col:
return False
if abs(cur_queen_col - col) == abs(i - row):
return False
return True
def backtrack(row: int, queens: List[int], string: List[str]) -> bool:
"""
Put queen to the safe position of current row.
:param row: current row.
:param queens: positions of previous queens.
:param string: string format of previous queens
:return:
"""
if row == n:
# all n queens have been put.
ans.append(string)
return True
success = False
for col in range(n):
if validate(row, col, queens):
next_queen_safe = backtrack(row + 1, queens + [col],
string + [output(col)])
if next_queen_safe:
success = True
return success
for i in range(n):
backtrack(1, [i], [output(i)])
return ans
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/n_queens.py",
"copies": "1",
"size": "2689",
"license": "mit",
"hash": 6734350502449306000,
"line_mean": 27.7311827957,
"line_max": 78,
"alpha_frac": 0.5078592814,
"autogenerated": false,
"ratio": 3.8335724533715925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48414317347715924,
"avg_score": null,
"num_lines": null
} |
"""5_1 to 5_1_1
- Add column topology_order to deployment_update_steps table
Revision ID: 5ce2b0cbb6f3
Revises: 387fcd049efb
Create Date: 2020-11-09 15:12:12.055532
"""
import yaml
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column, select
from manager_rest.storage.models_base import UTCDateTime
# revision identifiers, used by Alembic.
revision = '5ce2b0cbb6f3'
down_revision = '387fcd049efb'
branch_labels = None
depends_on = None
def upgrade():
op.add_column(
'deployment_update_steps',
sa.Column('topology_order',
sa.Integer(),
nullable=False,
server_default="0"))
create_deployments_labels_table()
permissions_table = _create_permissions_table()
_load_permissions(permissions_table)
_create_maintenance_mode_table()
op.add_column(
'roles',
sa.Column('type',
sa.Text(),
nullable=False,
server_default='tenant_role'))
def downgrade():
op.drop_column('deployment_update_steps', 'topology_order')
drop_deployments_labels_table()
op.drop_table('permissions')
op.drop_column('roles', 'type')
op.drop_index(op.f('maintenance_mode__requested_by_idx'),
table_name='maintenance_mode')
op.drop_table('maintenance_mode')
def create_deployments_labels_table():
_create_labels_table('deployments_labels',
'_deployment_fk',
u'deployments._storage_id',
'_deployment_idx')
def drop_deployments_labels_table():
op.drop_table('deployments_labels')
def _create_labels_table(table_name, fk_column, fk_refcolumn, fk_index):
"""
This is an auxiliary function to create an object's labels table.
:param table_name: The table name. E.g. deployments_labels
:param fk_column: The object's foreign key column name. E.g. _deployment_fk
:param fk_refcolumn: The object's foreign key reference column. E.g.
u'deployments._storage_id'
:param fk_index: The object's foreign key index name. E.g. _deployment_idx
"""
op.create_table(
table_name,
sa.Column('id',
sa.Integer(),
autoincrement=True,
nullable=False),
sa.Column('key', sa.Text(), nullable=False),
sa.Column('value', sa.Text(), nullable=False),
sa.Column(fk_column, sa.Integer(), nullable=False),
sa.Column('created_at', UTCDateTime(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
[fk_column],
[fk_refcolumn],
name=op.f('{0}_{1}'.format(table_name, fk_column)),
ondelete='CASCADE'),
sa.ForeignKeyConstraint(
['_creator_id'],
[u'users.id'],
name=op.f('{0}__creator_id_fkey'.format(table_name)),
ondelete='CASCADE'),
sa.PrimaryKeyConstraint(
'id',
name=op.f('{0}_pkey'.format(table_name))),
sa.UniqueConstraint(
'key', 'value', fk_column, name=op.f('{0}_key_value_key'))
)
op.create_index(op.f('{0}_created_at_idx'.format(table_name)),
table_name,
['created_at'],
unique=False)
op.create_index(op.f('{0}__creator_id_idx'.format(table_name)),
table_name,
['_creator_id'],
unique=False)
op.create_index(op.f('{0}_key_idx'.format(table_name)),
table_name,
['key'],
unique=False)
op.create_index(op.f('{0}_{1}'.format(table_name, fk_index)),
table_name,
[fk_column],
unique=False)
def _create_permissions_table():
return op.create_table(
'permissions',
sa.Column('id', sa.Integer(), nullable=False, autoincrement=True),
sa.Column('role_id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(
['role_id'],
[u'roles.id'],
ondelete='CASCADE',
),
)
def _create_maintenance_mode_table():
op.create_table(
'maintenance_mode',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('status', sa.Text(), nullable=False),
sa.Column('activation_requested_at', UTCDateTime(), nullable=False),
sa.Column('activated_at', UTCDateTime(), nullable=True),
sa.Column('_requested_by', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['_requested_by'],
['users.id'],
name=op.f('maintenance_mode__requested_by_fkey'),
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name=op.f('maintenance_mode_pkey'))
)
op.create_index(
op.f('maintenance_mode__requested_by_idx'),
'maintenance_mode',
['_requested_by'],
unique=False)
def _load_permissions(permissions_table):
"""Load permissions from the conf file, if it exists."""
try:
with open('/opt/manager/authorization.conf') as f:
data = yaml.safe_load(f)
permissions = data['permissions']
except (IOError, KeyError):
return
roles_table = table('roles', column('id'), column('name'))
for permission, roles in permissions.items():
for role in roles:
op.execute(
permissions_table.insert()
.from_select(
['name', 'role_id'],
select([
op.inline_literal(permission).label('name'),
roles_table.c.id
])
.where(roles_table.c.name == op.inline_literal(role))
.limit(op.inline_literal(1))
)
)
| {
"repo_name": "cloudify-cosmo/cloudify-manager",
"path": "resources/rest-service/cloudify/migrations/versions/5ce2b0cbb6f3_5_1_to_5_1_1.py",
"copies": "1",
"size": "6081",
"license": "apache-2.0",
"hash": 3645664928210240000,
"line_mean": 32.9720670391,
"line_max": 79,
"alpha_frac": 0.5558296333,
"autogenerated": false,
"ratio": 3.958984375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5014814008299999,
"avg_score": null,
"num_lines": null
} |
#5/23/17
#GOAL: parse data from USGS water resources software list into something
#uploadable to the ontosoft portal for USGS. Use this formatted data and upload
#to USGS Ontosoft portal
from urllib.request import urlopen
import requests
import json
def main():
#readURL(url, fileName)
usgsData = getInfo('usgsData')
loginUrl = 'http://usgs.ontosoft.org/repository/login'
ID = authenticateUser(loginUrl, 'mknoblock','testPassword')
for software in usgsData:
postSoftware(software, ID)
def authenticateUser(url, username, password):
'''Authenticates user and password on usgs portal of ontosoft
input: string username, password
output: string session id'''
cred = {'name': username, 'password': password}
headers = {'content-type':'application/json'}
response = requests.post(url, data = json.dumps(cred), headers = headers)
content = json.loads(response.content.decode('utf-8'))
return content['sessionString']
def postSoftware(softwareInfo, sessionID):
'''Adds software to USGS Ontosoft portal with softwareInfo given in the format
[name, description, [os], version]. If software name already exists in portal, will
not post anything but request will go through.
Input: list softwareInfo, string sessionID
Output: none'''
osVals = []
url = 'http://usgs.ontosoft.org/repository/software'
headers = {'content-type': 'application/json','X-Ontosoft-Session':sessionID}
nameInfo = {"@type":"TextEntity", "type":"http://ontosoft.org/software#TextEntity"}
descInfo = {"@type":"TextEntity", "type":"http://ontosoft.org/software#TextEntity"}
for os in softwareInfo[2]:
osVals += [{"@type":"EnumerationEntity", "type":"http://ontosoft.org/software#OperatingSystem", "value": os}]
values = {}
nameInfo['value'] = softwareInfo[0]
descInfo['value'] = softwareInfo[1]
values["http://ontosoft.org/software#hasName"] = [nameInfo]
values["http://ontosoft.org/software#hasShortDescription"] = [descInfo]
values["http://ontosoft.org/software#supportsOperatingSystem"] = osVals
data = {"@type":"Software","value":values,"label":softwareInfo[0]}
response = requests.post(url, data = json.dumps(data), headers = headers)
return response
def readURL(url, fileName):
''' Reads a given URL into a text file.
Input: String url, String name
Output: Saves text file in folder with given name '''
page = urlopen(url)
html_content = page.read()
file = open(name, 'wb')
file.write(html_content)
file.close()
def getInfo(fileName):
''' Reads and formats information from text file according to style
of USGS website
Input: String file name
Output: list of software informations where each software has a list of information '''
file = open(fileName, 'r', encoding='utf-8')
atList = False
allSoftwares = []
for line in file:
skipThisLine = False
if 'Alphabetical list' in line:
skipThisLine = True
atList = True #this is the indicator that all softwares will be listed
elif 'Abbreviations' in line: atList = False #this is the point in the page where softwares are no longer listed
if atList and not skipThisLine and '•' in line:
allSoftwares += [formatLine(line)]
return allSoftwares
def formatLine(line):
''' Takes in a string of information about a software and creates a list with
the information organized in the following format:
[name, description, [os], version]
Input: string line
Output: list '''
software = ['','','','']
version = False
formattedOS = []
versionNum = ''
separated = line.split('\u2028') #splits information by identifier for new line
if 'Version' in separated[0]: #if there is a version number
version = True
versionInd = separated[0].index('Version')
if '(' in separated[0]: #if there is an os
osInd = separated[0].index('(') #starting index for os
title = separated[0][3:osInd] #starts at 5 to eliminate spacing and bullet point
os = separated[0][osInd+1:line.index(')')]
formattedOS = formatOS(os)
elif version:
title = separated[0][3:versionInd]
elif ',' in separated[0]: #if there is a date
title = separated[0][3:separated[0].index(',')]
else:
title = separated[0][3:]
if version and ',' in separated[0]: #if there is a date
dateInd = line.index(',')
versionNum = line[versionInd:dateInd]
elif version:
versionNum = separated[0][versionInd:]
return [title, separated[1].strip('\n'), formattedOS, versionNum]
def formatOS(osString):
''' This function takes in an osString which may have multiple OSes separated
by '/' and puts them into the list. Also changes OS abbreviations as specified
on USGS website and puts them into full form
Input: string of OS abbreviations
Output: list of OS full names'''
if osString == '':
numOS = 0
else:
numOS = 1 + osString.count('/')
os = []
while numOS > 0:
if numOS == 1:
osAbbrev = osString
else:
ind = osString.index('/')
osAbbrev = osString[0:ind]
osString = osString[ind+1:]
osName = fullName(osAbbrev)
os += [osName]
numOS -= 1
return os
def fullName(osAbbrev):
''' Given an OS abbreviation, returns the full OS name. OS names and abbreviations
given by https://water.usgs.gov/software/lists/alphabetical under 'Abbreviations for OSs section'
Input: string osAbbrev
Output: string osName'''
osName = osAbbrev
OSs = {'DOS':'IBM-compatible PC', 'DG':'Data General AViiON DG/UX', 'Mac':'Macintosh',\
'SGI': 'Silicon Graphics Indigo', 'Sun': 'Sun SPARCstation Solaris', 'Win': 'Microsoft Windows'}
if osAbbrev in OSs:
osName = OSs[osAbbrev]
return osName
| {
"repo_name": "mrk7217/usgsOntosoft",
"path": "USGSProjectAnalysis.py",
"copies": "1",
"size": "5992",
"license": "apache-2.0",
"hash": -2646248797247152000,
"line_mean": 38.1503267974,
"line_max": 120,
"alpha_frac": 0.6550918197,
"autogenerated": false,
"ratio": 3.779179810725552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4934271630425552,
"avg_score": null,
"num_lines": null
} |
"""52. N-Queens II
https://leetcode.com/problems/n-queens-ii/description/
The n-queens puzzle is the problem of placing n queens on an n×n chessboard
such that no two queens attack each other.
Given an integer n, return the number of distinct solutions to the n-queens
puzzle.
Example:
Input: 4
Output: 2
Explanation: There are two distinct solutions to the 4-queens puzzle as shown
below.
[
[".Q..", // Solution 1
"...Q",
"Q...",
"..Q."],
["..Q.", // Solution 2
"Q...",
"...Q",
".Q.."]
]
"""
from typing import List
class Solution:
def total_n_queens(self, n: int) -> int:
ans = 0
def validate(row: int, col: int, queens: List[int]) -> bool:
"""
Validate current pos[row, col] if is safe.
:param row: current row.
:param col: current column.
:param queens: the columns of previous queens.
:return:
"""
for i in range(row):
cur_queen_col = queens[i]
if row == i or col == cur_queen_col \
or abs(cur_queen_col - col) == abs(i - row):
return False
return True
def backtrack(row: int, queens: List[int], ):
"""
Put queen to the safe position of current row.
:param row: current row.
:param queens: positions of previous queens.
:return:
"""
if row == n:
nonlocal ans
ans += 1
return
for col in range(n):
if validate(row, col, queens):
backtrack(row + 1, queens + [col])
for i in range(n):
backtrack(1, [i])
return ans
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/n_queens_ii.py",
"copies": "1",
"size": "1759",
"license": "mit",
"hash": -1569239248940749800,
"line_mean": 23.0136986301,
"line_max": 77,
"alpha_frac": 0.5099828865,
"autogenerated": false,
"ratio": 3.6597077244258873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9669690610925887,
"avg_score": 0,
"num_lines": 73
} |
"""5_2 to 5_3
- Create blueprints_labels table
- Create deployment labels dependencies table
- Apply some modification to the deployments labels table
- Split the `filters` table to `deployments_filters` and `blueprints_filters`
- Add installation_status to the deployment table
- Add deployment_status to the deployment table
- Add latest execution FK to the deployment table
- Add statuses and counters for sub-services and sub-environments
Revision ID: 396303c07e35
Revises: 9d261e90b1f3
Create Date: 2021-02-15 12:02:22.089135
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from cloudify.models_states import VisibilityState
from manager_rest.storage.models_base import UTCDateTime, JSONString
# revision identifiers, used by Alembic.
revision = '396303c07e35'
down_revision = '9d261e90b1f3'
branch_labels = None
depends_on = None
installation_status = sa.Enum(
'active',
'inactive',
name='installation_status'
)
deployment_status = sa.Enum(
'good',
'in_progress',
'requires_attention',
name='deployment_status'
)
VISIBILITY_ENUM = postgresql.ENUM(
*VisibilityState.STATES,
name='visibility_states',
create_type=False
)
def upgrade():
_create_blueprints_labels_table()
_modify_deployments_labels_table()
_modify_execution_schedules_table()
_add_specialized_execution_fk()
_create_filters_tables()
_add_deployment_statuses()
_add_execgroups_concurrency()
_add_executions_columns()
_create_deployment_labels_dependencies_table()
_add_deployment_sub_statuses_and_counters()
_create_depgroups_labels_table()
_modify_users_table()
def downgrade():
_revert_changes_to_users_table()
_drop_depgroups_labels_table()
_drop_deployment_sub_statuses_and_counters()
_drop_deployment_labels_dependencies_table()
_drop_execgroups_concurrency()
_drop_deployment_statuses()
_revert_filters_modifications()
_drop_specialized_execution_fk()
_revert_changes_to_execution_schedules_table()
_revert_changes_to_deployments_labels_table()
_drop_blueprints_labels_table()
_drop_execution_columns()
_drop_deployment_statuses_enum_types()
def _add_deployment_sub_statuses_and_counters():
op.add_column(
'deployments',
sa.Column(
'sub_environments_count',
sa.Integer(),
nullable=False,
server_default='0',
)
)
op.add_column(
'deployments',
sa.Column(
'sub_environments_status',
sa.Enum(
'good',
'in_progress',
'require_attention',
name='deployment_status'
),
nullable=True
)
)
op.add_column(
'deployments',
sa.Column(
'sub_services_count',
sa.Integer(),
nullable=False,
server_default='0',
)
)
op.add_column(
'deployments',
sa.Column(
'sub_services_status',
sa.Enum('good',
'in_progress',
'require_attention',
name='deployment_status'
),
nullable=True
)
)
def _create_deployment_labels_dependencies_table():
op.create_table(
'deployment_labels_dependencies',
sa.Column('_storage_id', sa.Integer(), autoincrement=True,
nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column('visibility', VISIBILITY_ENUM, nullable=True),
sa.Column('created_at', UTCDateTime(), nullable=False),
sa.Column('_source_deployment', sa.Integer(),
nullable=False),
sa.Column('_target_deployment', sa.Integer(),
nullable=False),
sa.Column('_tenant_id', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_creator_id'], ['users.id'],
name=op.f('deployment_labels_dependencies__creator_id_fkey'),
ondelete='CASCADE'
),
sa.ForeignKeyConstraint(
['_tenant_id'], ['tenants.id'],
name=op.f('deployment_labels_dependencies__tenant_id_fkey'),
ondelete='CASCADE'
),
sa.ForeignKeyConstraint(
['_source_deployment'], ['deployments._storage_id'],
name=op.f(
'deployment_labels_dependencies__source_deployment_fkey'
), ondelete='CASCADE'
),
sa.ForeignKeyConstraint(
['_target_deployment'], ['deployments._storage_id'],
name=op.f(
'deployment_labels_dependencies__target_deployment_fkey'
), ondelete='CASCADE'
),
sa.PrimaryKeyConstraint(
'_storage_id', name=op.f('deployment_labels_dependencies_pkey')
),
sa.UniqueConstraint(
'_source_deployment',
'_target_deployment',
name=op.f(
'deployment_labels_dependencies__source_deployment_key'
)
)
)
op.create_index(
op.f('deployment_labels_dependencies__creator_id_idx'),
'deployment_labels_dependencies', ['_creator_id'], unique=False
)
op.create_index(
op.f('deployment_labels_dependencies__tenant_id_idx'),
'deployment_labels_dependencies', ['_tenant_id'], unique=False
)
op.create_index(
op.f('deployment_labels_dependencies_created_at_idx'),
'deployment_labels_dependencies', ['created_at'], unique=False
)
op.create_index(
op.f('deployment_labels_dependencies_id_idx'),
'deployment_labels_dependencies', ['id'], unique=False
)
op.create_index(
op.f('deployment_labels_dependencies__source_deployment_idx'),
'deployment_labels_dependencies', ['_source_deployment'],
unique=False
)
op.create_index(
op.f('deployment_labels_dependencies__target_deployment_idx'),
'deployment_labels_dependencies', ['_target_deployment'],
unique=False
)
op.create_index(
op.f('deployment_labels_dependencies_visibility_idx'),
'deployment_labels_dependencies', ['visibility'], unique=False
)
def _add_deployment_statuses():
installation_status.create(op.get_bind())
deployment_status.create(op.get_bind())
op.add_column(
'deployments',
sa.Column(
'installation_status',
type_=installation_status,
nullable=True
)
)
op.add_column(
'deployments',
sa.Column(
'deployment_status',
type_=deployment_status,
nullable=True
)
)
def _add_specialized_execution_fk():
"""Add FKs that point to special executions:
- the upload_blueprint execution for a blueprint
- the create-dep-env execution for a deployment
"""
op.add_column(
'blueprints',
sa.Column('_upload_execution_fk', sa.Integer(), nullable=True)
)
op.create_index(
op.f('blueprints__upload_execution_fk_idx'),
'blueprints',
['_upload_execution_fk'],
unique=False
)
op.create_foreign_key(
op.f('blueprints__upload_execution_fk_fkey'),
'blueprints',
'executions',
['_upload_execution_fk'],
['_storage_id'],
ondelete='SET NULL',
deferrable=True,
initially='DEFERRED',
)
op.add_column(
'deployments',
sa.Column('_create_execution_fk', sa.Integer(), nullable=True)
)
op.create_index(
op.f('deployments__create_execution_fk_idx'),
'deployments',
['_create_execution_fk'],
unique=False
)
op.create_foreign_key(
op.f('deployments__create_execution_fk_fkey'),
'deployments',
'executions',
['_create_execution_fk'],
['_storage_id'],
ondelete='SET NULL',
deferrable=True,
initially='DEFERRED',
)
op.add_column(
'deployments',
sa.Column('_latest_execution_fk', sa.Integer(), nullable=True))
op.create_index(
op.f('deployments__latest_execution_fk_idx'),
'deployments',
['_latest_execution_fk'],
unique=True
)
op.create_foreign_key(
op.f('deployments__latest_execution_fk_fkey'),
'deployments',
'executions',
['_latest_execution_fk'],
['_storage_id'],
ondelete='SET NULL',
initially='DEFERRED',
deferrable=True,
use_alter=True
)
def _drop_specialized_execution_fk():
op.drop_constraint(
op.f('deployments__latest_execution_fk_fkey'),
'deployments',
type_='foreignkey'
)
op.drop_index(
op.f('deployments__latest_execution_fk_idx'),
table_name='deployments'
)
op.drop_column(
'deployments',
'_latest_execution_fk'
)
op.drop_constraint(
op.f('deployments__create_execution_fk_fkey'),
'deployments',
type_='foreignkey'
)
op.drop_index(
op.f('deployments__create_execution_fk_idx'),
table_name='deployments'
)
op.drop_column('deployments', '_create_execution_fk')
op.drop_constraint(
op.f('blueprints__upload_execution_fk_fkey'),
'blueprints',
type_='foreignkey'
)
op.drop_index(
op.f('blueprints__upload_execution_fk_idx'),
table_name='blueprints'
)
op.drop_column('blueprints', '_upload_execution_fk')
def _create_blueprints_labels_table():
op.create_table(
'blueprints_labels',
sa.Column('created_at', UTCDateTime(), nullable=False),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('key', sa.Text(), nullable=False),
sa.Column('value', sa.Text(), nullable=False),
sa.Column('_labeled_model_fk', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_creator_id'],
['users.id'],
name=op.f('blueprints_labels__creator_id_fkey'),
ondelete='CASCADE'),
sa.ForeignKeyConstraint(
['_labeled_model_fk'],
['blueprints._storage_id'],
name=op.f('blueprints_labels__labeled_model_fk_fkey'),
ondelete='CASCADE'),
sa.PrimaryKeyConstraint(
'id',
name=op.f('blueprints_labels_pkey')),
sa.UniqueConstraint(
'key',
'value',
'_labeled_model_fk',
name=op.f('blueprints_labels_key_key'))
)
op.create_index(op.f('blueprints_labels__creator_id_idx'),
'blueprints_labels',
['_creator_id'],
unique=False)
op.create_index(op.f('blueprints_labels__labeled_model_fk_idx'),
'blueprints_labels',
['_labeled_model_fk'],
unique=False)
op.create_index(op.f('blueprints_labels_created_at_idx'),
'blueprints_labels',
['created_at'],
unique=False)
op.create_index(op.f('blueprints_labels_key_idx'),
'blueprints_labels',
['key'],
unique=False)
op.create_index(op.f('blueprints_labels_value_idx'),
'blueprints_labels',
['value'],
unique=False)
dl_table = sa.table(
'deployments_labels',
sa.Column('_labeled_model_fk'),
sa.Column('_deployment_fk')
)
def _modify_deployments_labels_table():
op.add_column('deployments_labels',
sa.Column('_labeled_model_fk', sa.Integer(), nullable=True))
op.execute(
dl_table
.update()
.where(dl_table.c._labeled_model_fk.is_(None))
.values(_labeled_model_fk=dl_table.c._deployment_fk)
)
op.alter_column(
'deployments_labels',
'_labeled_model_fk',
existing_type=sa.Integer(),
nullable=False
)
op.drop_index('deployments_labels__deployment_fk_idx',
table_name='deployments_labels')
op.drop_constraint('deployments_labels_key_key',
'deployments_labels',
type_='unique')
op.create_unique_constraint(op.f('deployments_labels_key_key'),
'deployments_labels',
['key', 'value', '_labeled_model_fk'])
op.create_index(op.f('deployments_labels__labeled_model_fk_idx'),
'deployments_labels',
['_labeled_model_fk'],
unique=False)
op.drop_constraint('deployments_labels__deployment_fk',
'deployments_labels',
type_='foreignkey')
op.create_foreign_key(op.f('deployments_labels__labeled_model_fk_fkey'),
'deployments_labels',
'deployments',
['_labeled_model_fk'],
['_storage_id'],
ondelete='CASCADE')
op.drop_column('deployments_labels', '_deployment_fk')
def _revert_changes_to_deployments_labels_table():
op.add_column('deployments_labels',
sa.Column('_deployment_fk',
sa.INTEGER(),
autoincrement=False))
op.execute(
dl_table
.update()
.values(_deployment_fk=dl_table.c._labeled_model_fk)
)
op.drop_constraint(op.f('deployments_labels__labeled_model_fk_fkey'),
'deployments_labels',
type_='foreignkey')
op.create_foreign_key('deployments_labels__deployment_fk',
'deployments_labels',
'deployments',
['_deployment_fk'],
['_storage_id'],
ondelete='CASCADE')
op.drop_index(op.f('deployments_labels__labeled_model_fk_idx'),
table_name='deployments_labels')
op.drop_constraint(op.f('deployments_labels_key_key'),
'deployments_labels',
type_='unique')
op.create_unique_constraint('deployments_labels_key_key',
'deployments_labels',
['key', 'value', '_deployment_fk'])
op.create_index('deployments_labels__deployment_fk_idx',
'deployments_labels',
['_deployment_fk'],
unique=False)
op.drop_column('deployments_labels', '_labeled_model_fk')
def _modify_execution_schedules_table():
op.create_index('execution_schedules_id__deployment_fk_idx',
'execution_schedules',
['id', '_deployment_fk', '_tenant_id'],
unique=True)
op.create_unique_constraint(op.f('execution_schedules_id_key'),
'execution_schedules',
['id', '_deployment_fk', '_tenant_id'])
def _revert_changes_to_execution_schedules_table():
op.drop_constraint(op.f('execution_schedules_id_key'),
'execution_schedules', type_='unique')
op.drop_index('execution_schedules_id__deployment_fk_idx',
table_name='execution_schedules')
def _drop_blueprints_labels_table():
op.drop_index(op.f('blueprints_labels_value_idx'),
table_name='blueprints_labels')
op.drop_index(op.f('blueprints_labels_key_idx'),
table_name='blueprints_labels')
op.drop_index(op.f('blueprints_labels_created_at_idx'),
table_name='blueprints_labels')
op.drop_index(op.f('blueprints_labels__labeled_model_fk_idx'),
table_name='blueprints_labels')
op.drop_index(op.f('blueprints_labels__creator_id_idx'),
table_name='blueprints_labels')
op.drop_table('blueprints_labels')
def _drop_deployment_statuses():
op.drop_column('deployments', 'installation_status')
op.drop_column('deployments', 'deployment_status')
def _create_filters_tables():
op.create_table(
'blueprints_filters',
sa.Column('_storage_id',
sa.Integer(),
autoincrement=True,
nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column('visibility', VISIBILITY_ENUM, nullable=True),
sa.Column('created_at', UTCDateTime(), nullable=False),
sa.Column('value', JSONString(), nullable=True),
sa.Column('updated_at', UTCDateTime(), nullable=True),
sa.Column('_tenant_id', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.Column('is_system_filter', sa.Boolean(), nullable=False,
server_default='f'),
sa.ForeignKeyConstraint(
['_creator_id'],
['users.id'],
name=op.f('blueprints_filters__creator_id_fkey'),
ondelete='CASCADE'),
sa.ForeignKeyConstraint(
['_tenant_id'],
['tenants.id'],
name=op.f('blueprints_filters__tenant_id_fkey'),
ondelete='CASCADE'),
sa.PrimaryKeyConstraint(
'_storage_id',
name=op.f('blueprints_filters_pkey'))
)
op.create_index(op.f('blueprints_filters__creator_id_idx'),
'blueprints_filters',
['_creator_id'],
unique=False)
op.create_index(op.f('blueprints_filters__tenant_id_idx'),
'blueprints_filters',
['_tenant_id'],
unique=False)
op.create_index(op.f('blueprints_filters_created_at_idx'),
'blueprints_filters',
['created_at'],
unique=False)
op.create_index('blueprints_filters_id__tenant_id_idx',
'blueprints_filters',
['id', '_tenant_id'],
unique=True)
op.create_index(op.f('blueprints_filters_id_idx'),
'blueprints_filters',
['id'],
unique=False)
op.create_index(op.f('blueprints_filters_visibility_idx'),
'blueprints_filters',
['visibility'],
unique=False)
op.create_index(op.f('blueprints_filters_is_system_filter_idx'),
'blueprints_filters',
['is_system_filter'],
unique=False)
op.create_table(
'deployments_filters',
sa.Column('_storage_id',
sa.Integer(),
autoincrement=True,
nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column('visibility', VISIBILITY_ENUM, nullable=True),
sa.Column('created_at', UTCDateTime(), nullable=False),
sa.Column('value', JSONString(), nullable=True),
sa.Column('updated_at', UTCDateTime(), nullable=True),
sa.Column('_tenant_id', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.Column('is_system_filter', sa.Boolean(), nullable=False,
server_default='f'),
sa.ForeignKeyConstraint(
['_creator_id'],
['users.id'],
name=op.f('deployments_filters__creator_id_fkey'),
ondelete='CASCADE'),
sa.ForeignKeyConstraint(
['_tenant_id'],
['tenants.id'],
name=op.f('deployments_filters__tenant_id_fkey'),
ondelete='CASCADE'),
sa.PrimaryKeyConstraint(
'_storage_id',
name=op.f('deployments_filters_pkey'))
)
op.create_index(op.f('deployments_filters__creator_id_idx'),
'deployments_filters',
['_creator_id'],
unique=False)
op.create_index(op.f('deployments_filters__tenant_id_idx'),
'deployments_filters',
['_tenant_id'],
unique=False)
op.create_index(op.f('deployments_filters_created_at_idx'),
'deployments_filters',
['created_at'],
unique=False)
op.create_index('deployments_filters_id__tenant_id_idx',
'deployments_filters',
['id', '_tenant_id'],
unique=True)
op.create_index(op.f('deployments_filters_id_idx'),
'deployments_filters',
['id'],
unique=False)
op.create_index(op.f('deployments_filters_visibility_idx'),
'deployments_filters',
['visibility'],
unique=False)
op.create_index(op.f('deployments_filters_is_system_filter_idx'),
'deployments_filters',
['is_system_filter'],
unique=False)
op.drop_index('filters__creator_id_idx',
table_name='filters')
op.drop_index('filters__tenant_id_idx',
table_name='filters')
op.drop_index('filters_created_at_idx',
table_name='filters')
op.drop_index('filters_id__tenant_id_idx',
table_name='filters')
op.drop_index('filters_id_idx',
table_name='filters')
op.drop_index('filters_visibility_idx',
table_name='filters')
op.drop_table('filters')
def _revert_filters_modifications():
op.create_table(
'filters',
sa.Column('_storage_id',
sa.INTEGER(),
autoincrement=True,
nullable=False),
sa.Column('id', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('value', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('visibility',
VISIBILITY_ENUM,
autoincrement=False,
nullable=True),
sa.Column('created_at',
UTCDateTime,
autoincrement=False,
nullable=False),
sa.Column('updated_at',
UTCDateTime,
autoincrement=False,
nullable=True),
sa.Column('_tenant_id',
sa.INTEGER(),
autoincrement=False,
nullable=False),
sa.Column('_creator_id',
sa.INTEGER(),
autoincrement=False,
nullable=False),
sa.ForeignKeyConstraint(
['_creator_id'],
['users.id'],
name='filters__creator_id_fkey',
ondelete='CASCADE'),
sa.ForeignKeyConstraint(
['_tenant_id'],
['tenants.id'],
name='filters__tenant_id_fkey',
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('_storage_id', name='filters_pkey')
)
op.create_index('filters_visibility_idx',
'filters',
['visibility'],
unique=False)
op.create_index('filters_id_idx',
'filters',
['id'],
unique=False)
op.create_index('filters_id__tenant_id_idx',
'filters',
['id', '_tenant_id'],
unique=True)
op.create_index('filters_created_at_idx',
'filters',
['created_at'],
unique=False)
op.create_index('filters__tenant_id_idx',
'filters',
['_tenant_id'],
unique=False)
op.create_index('filters__creator_id_idx',
'filters',
['_creator_id'],
unique=False)
op.drop_index(op.f('deployments_filters_visibility_idx'),
table_name='deployments_filters')
op.drop_index(op.f('deployments_filters_id_idx'),
table_name='deployments_filters')
op.drop_index('deployments_filters_id__tenant_id_idx',
table_name='deployments_filters')
op.drop_index(op.f('deployments_filters_created_at_idx'),
table_name='deployments_filters')
op.drop_index(op.f('deployments_filters__tenant_id_idx'),
table_name='deployments_filters')
op.drop_index(op.f('deployments_filters__creator_id_idx'),
table_name='deployments_filters')
op.drop_index(op.f('deployments_filters_is_system_filter_idx'),
table_name='deployments_filters')
op.drop_table('deployments_filters')
op.drop_index(op.f('blueprints_filters_visibility_idx'),
table_name='blueprints_filters')
op.drop_index(op.f('blueprints_filters_id_idx'),
table_name='blueprints_filters')
op.drop_index('blueprints_filters_id__tenant_id_idx',
table_name='blueprints_filters')
op.drop_index(op.f('blueprints_filters_created_at_idx'),
table_name='blueprints_filters')
op.drop_index(op.f('blueprints_filters__tenant_id_idx'),
table_name='blueprints_filters')
op.drop_index(op.f('blueprints_filters__creator_id_idx'),
table_name='blueprints_filters')
op.drop_index(op.f('blueprints_filters_is_system_filter_idx'),
table_name='blueprints_filters')
op.drop_table('blueprints_filters')
def _add_execgroups_concurrency():
op.add_column(
'execution_groups',
sa.Column(
'concurrency',
sa.Integer(),
server_default='5',
nullable=False
)
)
def _drop_execgroups_concurrency():
op.drop_column('execution_groups', 'concurrency')
def _add_executions_columns():
op.add_column(
'executions',
sa.Column('finished_operations', sa.Integer(), nullable=True)
)
op.add_column(
'executions',
sa.Column('total_operations', sa.Integer(), nullable=True)
)
op.add_column(
'executions',
sa.Column('resume', sa.Boolean(),
server_default='false', nullable=False)
)
def _drop_execution_columns():
op.drop_column('executions', 'total_operations')
op.drop_column('executions', 'finished_operations')
op.drop_column('executions', 'resume')
def _drop_deployment_labels_dependencies_table():
op.drop_index(
op.f('deployment_labels_dependencies_visibility_idx'),
table_name='deployment_labels_dependencies'
)
op.drop_index(
op.f('deployment_labels_dependencies__target_deployment_idx'),
table_name='deployment_labels_dependencies'
)
op.drop_index(
op.f('deployment_labels_dependencies__source_deployment_idx'),
table_name='deployment_labels_dependencies'
)
op.drop_index(
op.f('deployment_labels_dependencies_id_idx'),
table_name='deployment_labels_dependencies'
)
op.drop_index(
op.f('deployment_labels_dependencies_created_at_idx'),
table_name='deployment_labels_dependencies'
)
op.drop_index(
op.f('deployment_labels_dependencies__tenant_id_idx'),
table_name='deployment_labels_dependencies'
)
op.drop_index(
op.f('deployment_labels_dependencies__creator_id_idx'),
table_name='deployment_labels_dependencies'
)
op.drop_table('deployment_labels_dependencies')
def _drop_deployment_sub_statuses_and_counters():
op.drop_column('deployments', 'sub_services_status')
op.drop_column('deployments', 'sub_services_count')
op.drop_column('deployments', 'sub_environments_status')
op.drop_column('deployments', 'sub_environments_count')
def _drop_deployment_statuses_enum_types():
installation_status.drop(op.get_bind())
deployment_status.drop(op.get_bind())
def _create_depgroups_labels_table():
op.create_table(
'deployment_groups_labels',
sa.Column('created_at', UTCDateTime(), nullable=False),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('key', sa.Text(), nullable=False),
sa.Column('value', sa.Text(), nullable=False),
sa.Column('_labeled_model_fk', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_creator_id'], ['users.id'],
name=op.f('deployment_groups_labels__creator_id_fkey'),
ondelete='CASCADE'),
sa.ForeignKeyConstraint(
['_labeled_model_fk'], ['deployment_groups._storage_id'],
name=op.f('deployment_groups_labels__labeled_model_fk_fkey'),
ondelete='CASCADE'),
sa.PrimaryKeyConstraint(
'id', name=op.f('deployment_groups_labels_pkey')),
sa.UniqueConstraint(
'key', 'value', '_labeled_model_fk',
name=op.f('deployment_groups_labels_key_key'))
)
op.create_index(
op.f('deployment_groups_labels__creator_id_idx'),
'deployment_groups_labels', ['_creator_id'], unique=False)
op.create_index(
op.f('deployment_groups_labels__labeled_model_fk_idx'),
'deployment_groups_labels', ['_labeled_model_fk'], unique=False)
op.create_index(
op.f('deployment_groups_labels_created_at_idx'),
'deployment_groups_labels', ['created_at'], unique=False)
op.create_index(
op.f('deployment_groups_labels_key_idx'),
'deployment_groups_labels', ['key'], unique=False)
op.create_index(
op.f('deployment_groups_labels_value_idx'),
'deployment_groups_labels', ['value'], unique=False)
def _drop_depgroups_labels_table():
op.drop_index(
op.f('deployment_groups_labels_value_idx'),
table_name='deployment_groups_labels')
op.drop_index(
op.f('deployment_groups_labels_key_idx'),
table_name='deployment_groups_labels')
op.drop_index(
op.f('deployment_groups_labels_created_at_idx'),
table_name='deployment_groups_labels')
op.drop_index(
op.f('deployment_groups_labels__labeled_model_fk_idx'),
table_name='deployment_groups_labels')
op.drop_index(
op.f('deployment_groups_labels__creator_id_idx'),
table_name='deployment_groups_labels')
op.drop_table('deployment_groups_labels')
def _modify_users_table():
op.add_column(
'users',
sa.Column('show_getting_started',
sa.Boolean(),
nullable=False,
server_default='t')
)
op.add_column('users',
sa.Column('first_login_at', UTCDateTime(), nullable=True))
def _revert_changes_to_users_table():
op.drop_column('users', 'first_login_at')
op.drop_column('users', 'show_getting_started')
| {
"repo_name": "cloudify-cosmo/cloudify-manager",
"path": "resources/rest-service/cloudify/migrations/versions/396303c07e35_5_2_to_5_3.py",
"copies": "1",
"size": "31218",
"license": "apache-2.0",
"hash": 6612465074529819000,
"line_mean": 34.037037037,
"line_max": 78,
"alpha_frac": 0.5583317317,
"autogenerated": false,
"ratio": 4.02968891183684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 891
} |
"""5_3 to 6_0
Revision ID: b92770a7b6ca
Revises: 396303c07e35
Create Date: 2021-04-12 09:33:44.399254
"""
from alembic import op
import sqlalchemy as sa
from manager_rest.storage import models
# revision identifiers, used by Alembic.
revision = 'b92770a7b6ca'
down_revision = '396303c07e35'
branch_labels = None
depends_on = None
def upgrade():
_add_execution_group_fk()
_add_new_execution_columns()
_drop_events_id()
_drop_logs_id()
_add_deployments_display_name_column()
_add_depgroups_creation_counter()
_add_execgroups_dep_fks()
_create_depgroup_dep_constraint()
def downgrade():
_drop_depgroup_dep_constraint()
_drop_execgroups_dep_fks()
_drop_depgroups_creation_counter()
_drop_deployments_display_name_column()
_create_logs_id()
_create_events_id()
_drop_execution_group_fk()
_drop_new_execution_columns()
def _drop_events_id():
op.drop_index('events_id_idx', table_name='events')
op.drop_column('events', 'id')
def _drop_logs_id():
op.drop_index('logs_id_idx', table_name='logs')
op.drop_column('logs', 'id')
def _create_logs_id():
op.add_column('logs', sa.Column('id', sa.Text(),
autoincrement=False, nullable=True))
op.create_index('logs_id_idx', 'logs', ['id'],
unique=False)
def _create_events_id():
op.add_column('events', sa.Column('id', sa.Text(),
autoincrement=False, nullable=True))
op.create_index('events_id_idx', 'events', ['id'],
unique=False)
def _add_new_execution_columns():
op.add_column(
'executions',
sa.Column('allow_custom_parameters', sa.Boolean(),
server_default='false', nullable=False)
)
def _drop_new_execution_columns():
op.drop_column('executions', 'allow_custom_parameters')
def _add_execution_group_fk():
op.add_column(
'events',
sa.Column('_execution_group_fk', sa.Integer(), nullable=True)
)
op.alter_column(
'events',
'_execution_fk',
existing_type=sa.Integer(),
nullable=True
)
op.create_index(
op.f('events__execution_group_fk_idx'),
'events',
['_execution_group_fk'],
unique=False
)
op.create_foreign_key(
op.f('events__execution_group_fk_fkey'),
'events',
'execution_groups',
['_execution_group_fk'],
['_storage_id'],
ondelete='CASCADE',
)
op.create_check_constraint(
'events__one_fk_not_null',
'events',
'(_execution_fk IS NOT NULL) != (_execution_group_fk IS NOT NULL)'
)
op.add_column(
'logs',
sa.Column('_execution_group_fk', sa.Integer(), nullable=True)
)
op.alter_column(
'logs',
'_execution_fk',
existing_type=sa.Integer(),
nullable=True
)
op.create_index(
op.f('logs__execution_group_fk_idx'),
'logs',
['_execution_group_fk'],
unique=False
)
op.create_foreign_key(
op.f('logs__execution_group_fk_fkey'),
'logs',
'execution_groups',
['_execution_group_fk'],
['_storage_id'],
ondelete='CASCADE'
)
op.create_check_constraint(
'logs__one_fk_not_null',
'logs',
'(_execution_fk IS NOT NULL) != (_execution_group_fk IS NOT NULL)'
)
def _drop_execution_group_fk():
op.drop_constraint(
op.f('logs__one_fk_not_null'),
'logs',
type_='check',
)
op.drop_constraint(
op.f('logs__execution_group_fk_fkey'),
'logs',
type_='foreignkey'
)
op.drop_index(
op.f('logs__execution_group_fk_idx'),
table_name='logs'
)
op.execute(
models.Log.__table__
.delete()
.where(models.Log.__table__.c._execution_fk.is_(None))
)
op.alter_column(
'logs',
'_execution_fk',
existing_type=sa.Integer(),
nullable=False
)
op.drop_column(
'logs',
'_execution_group_fk'
)
op.drop_constraint(
op.f('events__one_fk_not_null'),
'events',
type_='check',
)
op.drop_constraint(
op.f('events__execution_group_fk_fkey'),
'events',
type_='foreignkey'
)
op.drop_index(
op.f('events__execution_group_fk_idx'),
table_name='events'
)
op.execute(
models.Event.__table__
.delete()
.where(models.Event.__table__.c._execution_fk.is_(None))
)
op.alter_column(
'events',
'_execution_fk',
existing_type=sa.Integer(),
nullable=False
)
op.drop_column(
'events',
'_execution_group_fk'
)
def _add_deployments_display_name_column():
op.add_column('deployments',
sa.Column('display_name', sa.Text(), nullable=True))
op.execute(models.Deployment.__table__.update().values(
display_name=models.Deployment.__table__.c.id))
op.alter_column(
'deployments',
'display_name',
existing_type=sa.Text(),
nullable=False
)
op.create_index(op.f('deployments_display_name_idx'),
'deployments', ['display_name'], unique=False)
def _drop_deployments_display_name_column():
op.drop_index(op.f('deployments_display_name_idx'),
table_name='deployments')
op.drop_column('deployments', 'display_name')
def _add_depgroups_creation_counter():
op.add_column(
'deployment_groups',
sa.Column('creation_counter', sa.Integer(), nullable=False,
server_default='0')
)
def _drop_depgroups_creation_counter():
op.drop_column('deployment_groups', 'creation_counter')
def _add_execgroups_dep_fks():
op.add_column(
'execution_groups',
sa.Column('_success_group_fk', sa.Integer(), nullable=True)
)
op.add_column(
'execution_groups',
sa.Column('_failed_group_fk', sa.Integer(), nullable=True)
)
op.create_index(
op.f('execution_groups__failed_group_fk_idx'),
'execution_groups',
['_failed_group_fk'],
unique=False
)
op.create_index(
op.f('execution_groups__success_group_fk_idx'),
'execution_groups',
['_success_group_fk'],
unique=False
)
op.create_foreign_key(
op.f('execution_groups__success_group_fk_fkey'),
'execution_groups',
'deployment_groups',
['_success_group_fk'],
['_storage_id'],
ondelete='SET NULL'
)
op.create_foreign_key(
op.f('execution_groups__failed_group_fk_fkey'),
'execution_groups',
'deployment_groups',
['_failed_group_fk'],
['_storage_id'],
ondelete='SET NULL'
)
def _drop_execgroups_dep_fks():
op.drop_constraint(
op.f('execution_groups__failed_group_fk_fkey'),
'execution_groups',
type_='foreignkey'
)
op.drop_constraint(
op.f('execution_groups__success_group_fk_fkey'),
'execution_groups',
type_='foreignkey'
)
op.drop_index(
op.f('execution_groups__success_group_fk_idx'),
table_name='execution_groups'
)
op.drop_index(
op.f('execution_groups__failed_group_fk_idx'),
table_name='execution_groups'
)
op.drop_column('execution_groups', '_failed_group_fk')
op.drop_column('execution_groups', '_success_group_fk')
def _create_depgroup_dep_constraint():
op.create_unique_constraint(
op.f('deployment_groups_deployments_deployment_group_id_key'),
'deployment_groups_deployments',
['deployment_group_id', 'deployment_id']
)
def _drop_depgroup_dep_constraint():
op.drop_constraint(
op.f('deployment_groups_deployments_deployment_group_id_key'),
'deployment_groups_deployments',
type_='unique'
)
| {
"repo_name": "cloudify-cosmo/cloudify-manager",
"path": "resources/rest-service/cloudify/migrations/versions/b92770a7b6ca_5_3_to_6_0.py",
"copies": "1",
"size": "7968",
"license": "apache-2.0",
"hash": -5922347937264730000,
"line_mean": 24.6205787781,
"line_max": 74,
"alpha_frac": 0.5724146586,
"autogenerated": false,
"ratio": 3.4734088927637314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45458235513637313,
"avg_score": null,
"num_lines": null
} |
# 54
from collections import Counter, defaultdict
class Card(object):
def __init__(self, s):
self.value, self.color = Solve.orders[s[:-1]], s[-1:]
class Solve(object):
orders = {r:i for i, r in enumerate('23456789TJQKA', 1)}
def __init__(self):
self.nr_p1win = 0
with open('p054_poker.txt', 'r') as fd:
self.source = fd.read()
def royal_flush(self, cards):
vals = {x.value for x in cards}
colors = {x.color for x in cards}
return vals == {9, 10, 11, 12, 13} and len(colors) == 1
def straight_flush(self, cards):
colors = {x.color for x in cards}
if len(colors) == 1:
vals = [x.value for x in cards]
if {min(vals) + x for x in range(5)} == set(vals):
return min(vals)
return False
def four_of_a_kind(self, cards):
d = Counter([x.value for x in cards])
l = [order for order, count in d.iteritems() if count >= 4]
if l:
assert len(l) == 1
return l[0]
return False
def full_house(self, cards):
d = Counter([x.value for x in cards])
if set(d.values()) == {2, 3}:
return max(d.keys())
return False
def flush(self, cards):
colors = [x.color for x in cards]
return len(set(colors)) == 1
def straight(self, cards):
vals = [x.value for x in cards]
if set(vals) == {min(vals) + x for x in range(5)}:
return min(vals)
return False
def three_of_a_kind(self, cards):
d = Counter([x.value for x in cards])
l = [order for order, count in d.iteritems() if count >= 3]
if l:
assert len(l) == 1
return l[0]
return False
def two_pairs(self, cards):
d = Counter([x.value for x in cards])
l = [order for order, count in d.iteritems() if count >= 2]
if len(l) >= 2:
return max(l)
return False
def one_pair(self, cards):
d = Counter([x.value for x in cards])
l = [order for order, count in d.iteritems() if count >= 2]
if l:
return max(l)
return False
def high_card_cmp(self, cards1, cards2):
return sorted([x.value for x in cards1], reverse=True) > sorted([x.value for x in cards2], reverse=True)
def solve(self):
def last_cmp(func):
return func == self.one_pair
def compare(evalf, c1, c2):
r1, r2 = evalf(c1), evalf(c2)
if r1 and r2 is False:
return 1
elif r2 and r1 is False:
return 2
elif r1 is False and r2 is False:
return 0
else:
if isinstance(r1, int):
assert isinstance(r2, int)
if r1 != r2:
return 1 if r1 > r2 else 2
else:
return 1 if self.high_card_cmp(c1, c2) else 2
for line in self.source.splitlines():
c1 = [Card(x) for x in line.split()[:5]]
c2 = [Card(x) for x in line.split()[5:]]
for evf in (self.royal_flush, self.straight_flush, self.four_of_a_kind,
self.full_house, self.flush, self.straight, self.three_of_a_kind,
self.two_pairs, self.one_pair):
result = compare(evf, c1, c2)
if result == 1:
self.nr_p1win += 1
break
elif result == 2:
break
else: # tie
if last_cmp(evf):
if self.high_card_cmp(c1, c2):
self.nr_p1win += 1
break
return self.nr_p1win
s = Solve()
print(s.solve()) | {
"repo_name": "daicang/Euler",
"path": "p54.py",
"copies": "1",
"size": "3846",
"license": "mit",
"hash": -4629212245426725000,
"line_mean": 30.2764227642,
"line_max": 112,
"alpha_frac": 0.4880395216,
"autogenerated": false,
"ratio": 3.621468926553672,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9594644186611181,
"avg_score": 0.0029728523084980607,
"num_lines": 123
} |
"""54. Spiral Matrix
https://leetcode.com/problems/spiral-matrix/
Given a matrix of m x n elements (m rows, n columns), return all elements of
the matrix in spiral order.
Example 1:
Input:
[
[ 1, 2, 3 ],
[ 4, 5, 6 ],
[ 7, 8, 9 ]
]
Output: [1,2,3,6,9,8,7,4,5]
Example 2:
Input:
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9,10,11,12]
]
Output: [1,2,3,4,8,12,11,10,9,5,6,7]
"""
from typing import List
class Solution:
def spiral_order(self, matrix: List[List[int]]) -> List[int]:
if not matrix:
return []
ans = []
row, col = len(matrix), len(matrix[0])
i, j, count, direction = 0, 0, 0, 0
while count < row * col:
ans.append(matrix[i][j])
matrix[i][j] = None
count += 1
if direction == 0 and (j < col - 1 and matrix[i][j + 1] is not None) \
or direction == 3 and (matrix[i - 1][j] is None):
# process right
direction = 0
j += 1
elif direction == 1 and (
i < row - 1 and matrix[i + 1][j] is not None) or \
direction == 0 and (
j == col - 1 or matrix[i][j + 1] is None):
# process down
direction = 1
i += 1
elif direction == 2 and (j > 0 and matrix[i][j - 1] is not None) or \
direction == 1 and (
j == col - 1 or matrix[i][j + 1] is None):
# process left
direction = 2
j -= 1
elif direction == 3 and (i > 0 and matrix[i - 1][j] is not None) or \
direction == 2 and (
j == col - 1 or matrix[i][j + 1] is None):
# process up
direction = 3
i -= 1
return ans
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/spiral_matrix.py",
"copies": "1",
"size": "1856",
"license": "mit",
"hash": -9082359086984925000,
"line_mean": 27.1212121212,
"line_max": 82,
"alpha_frac": 0.4369612069,
"autogenerated": false,
"ratio": 3.437037037037037,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9371237756310256,
"avg_score": 0.0005520975253564146,
"num_lines": 66
} |
# 5-5. Alien Colors #3: Turn your if-else chain from Exercise 5-4 into an if-elif-else chain.
# • If the alien is green, print a message that the player earned 5 points.
# • If the alien is yellow, print a message that the player earned 10 points.
# • If the alien is red, print a message that the player earned 15 points.
# • Write three versions of this program, making sure each message is printed for the appropriate color alien.
alien_color = 'green'
if alien_color == 'green':
print("You just earned 5 points")
elif alien_color == 'yellow':
print("You just earned 10 points")
else:
print("You just earned 15 points")
# ---------------------------------------------------------
alien_color = 'yellow'
if alien_color == 'green':
print("You just earned 5 points")
elif alien_color == 'yellow':
print("You just earned 10 points")
else:
print("You just earned 15 points")
# ---------------------------------------------------------
alien_color = 'red'
if alien_color == 'green':
print("You just earned 5 points")
elif alien_color == 'yellow':
print("You just earned 10 points")
else:
print("You just earned 15 points") | {
"repo_name": "AnhellO/DAS_Sistemas",
"path": "Ago-Dic-2019/DanielM/PracticaUno/5.5_AlienColors3.py",
"copies": "1",
"size": "1168",
"license": "mit",
"hash": -7141399838608891000,
"line_mean": 34.1818181818,
"line_max": 110,
"alpha_frac": 0.6232758621,
"autogenerated": false,
"ratio": 3.6024844720496896,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47257603341496895,
"avg_score": null,
"num_lines": null
} |
"""55. Jump Game
https://leetcode.com/problems/jump-game/
Given an array of non-negative integers, you are initially positioned at the
first index of the array.
Each element in the array represents your maximum jump length at that position.
Determine if you are able to reach the last index.
Example 1:
Input: [2,3,1,1,4]
Output: true
Explanation: Jump 1 step from index 0 to 1, then 3 steps to the last index.
Example 2:
Input: [3,2,1,0,4]
Output: false
Explanation: You will always arrive at index 3 no matter what. Its maximum jump
length is 0, which makes it impossible to reach the last index.
"""
from typing import List
class Solution:
def can_jump(self, nums: List[int]) -> bool:
def indexes_of(lst, item):
return [i for i, x in enumerate(lst) if x == item]
if 0 not in nums:
return True
# sufficient and necessary condition to fail: always jump to 0.
length = len(nums)
zero_indexes = indexes_of(nums, 0)
for index in zero_indexes:
skip_zero = False
for i in range(index + 1):
if nums[i] <= index - i:
if nums[i] + i >= length - 1:
return True
# if nums[i] > zero_index - i, means it can jump over 0.
else:
skip_zero = True
break
if not skip_zero:
return False
return True
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/jump_game.py",
"copies": "1",
"size": "1476",
"license": "mit",
"hash": -7310829640529877000,
"line_mean": 27.3846153846,
"line_max": 79,
"alpha_frac": 0.5846883469,
"autogenerated": false,
"ratio": 3.9151193633952253,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49998077102952254,
"avg_score": null,
"num_lines": null
} |
"""560. Subarray Sum Equals K
Medium
Given an array of integers and an integer k, you need to find the total
number of continuous subarrays whose sum equals to k.
Example 1:
Input: nums = [1,1,1], k = 2
Output: 2
Note:
The length of the array is in range [1, 20,000].
The range of numbers in the array is [-1000, 1000] and the range of the
integer k is [-1e7, 1e7].
"""
class SolutionBruteForce(object):
def subarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
Time complexity: O(n^3).
Space complexity: O(n).
"""
result = 0
for i in range(len(nums)):
for j in range(i, len(nums)):
if sum(nums[i:(j + 1)]) == k:
result += 1
return result
class SolutionCusumCountDict(object):
def subarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
Time complexity: O(n).
Space complexity: O(n).
"""
from collections import defaultdict
# Create a dict: cusum->count, with init: 0->1.
cusum_count_d = defaultdict(int)
cusum_count_d[0] = 1
result = 0
cusum = 0
for num in nums:
cusum += num
# If cusum - k exists in cusum counts,
# subarray from (cusum - k) to "current" cusum equals k.
result += cusum_count_d[cusum - k]
# Increment cusum's count.
cusum_count_d[cusum] += 1
return result
def main():
import time
# Output: 2
nums = [1, 1, 1]
k = 2
start_time = time.time()
print 'Brute force: {}'.format(SolutionBruteForce().subarraySum(nums, k))
print 'Time: {}'.format(time.time() - start_time)
start_time = time.time()
print 'Cusum count dict: {}'.format(SolutionCusumCountDict().subarraySum(nums, k))
print 'Time: {}'.format(time.time() - start_time)
# Output: 3
nums = [10, 2, -2, -20, 10]
k = -10
start_time = time.time()
print 'Brute force: {}'.format(SolutionBruteForce().subarraySum(nums, k))
print 'Time: {}'.format(time.time() - start_time)
start_time = time.time()
print 'Cusum count dict: {}'.format(SolutionCusumCountDict().subarraySum(nums, k))
print 'Time: {}'.format(time.time() - start_time)
# Output: 0
nums = [1]
k = 0
start_time = time.time()
print 'Brute force: {}'.format(SolutionBruteForce().subarraySum(nums, k))
print 'Time: {}'.format(time.time() - start_time)
start_time = time.time()
print 'Cusum count dict: {}'.format(SolutionCusumCountDict().subarraySum(nums, k))
print 'Time: {}'.format(time.time() - start_time)
if __name__ == '__main__':
main()
| {
"repo_name": "bowen0701/algorithms_data_structures",
"path": "lc0560_subarray_sum_equals_k.py",
"copies": "1",
"size": "2805",
"license": "bsd-2-clause",
"hash": -3399014722919224000,
"line_mean": 24.9722222222,
"line_max": 86,
"alpha_frac": 0.5629233512,
"autogenerated": false,
"ratio": 3.3234597156398102,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43863830668398107,
"avg_score": null,
"num_lines": null
} |
"""56. Merge Intervals
https://leetcode.com/problems/merge-intervals/
Given a collection of intervals, merge all overlapping intervals.
Example 1:
Input: [[1,3],[2,6],[8,10],[15,18]]
Output: [[1,6],[8,10],[15,18]]
Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6].
Example 2:
Input: [[1,4],[4,5]]
Output: [[1,5]]
Explanation: Intervals [1,4] and [4,5] are considered overlapping.
"""
from typing import List
class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
def sort_list(lst: List[List[int]]):
return sorted(lst, key=lambda x: x[0])
def is_overlap(l1: List[int], l2: List[int]):
if l1[1] < l2[0] or l1[0] > l2[1]:
return False
return True
def merge_list(l1: List[int], l2: List[int]):
left = min(l1[0], l1[1], l2[0], l2[1])
right = max(l1[0], l1[1], l2[0], l2[1])
l1[0] = left
l1[1] = right
if not intervals:
return intervals
intervals = sort_list(intervals)
ans = [intervals[0]]
for i in range(1, len(intervals)):
if is_overlap(ans[-1], intervals[i]):
merge_list(ans[-1], intervals[i])
else:
ans.append(intervals[i])
return ans
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/merge_intervals.py",
"copies": "1",
"size": "1336",
"license": "mit",
"hash": -5937573995945112000,
"line_mean": 26.2653061224,
"line_max": 77,
"alpha_frac": 0.5464071856,
"autogenerated": false,
"ratio": 3.1069767441860465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9153383929786046,
"avg_score": 0,
"num_lines": 49
} |
# 56. Score of Parentheses
# Difficulty: Medium
#
# Given a balanced parentheses string S, compute the score of the string
# based on the following rule:
#
# () has score 1
# AB has score A + B, where A and B are balanced parentheses strings.
# (A) has score 2 * A, where A is a balanced parentheses string.
#
#
# Example 1:
#
# Input: "()"
# Output: 1
# Example 2:
#
# Input: "(())"
# Output: 2
# Example 3:
#
# Input: "()()"
# Output: 2
# Example 4:
#
# Input: "(()(()))"
# Output: 6
#
#
# Note:
#
# S is a balanced parentheses string, containing only ( and ).
# 2 <= S.length <= 50
class Solution:
def scoreOfParentheses(self, S):
"""
:type S: str
:rtype: int
"""
stack = []
for c in S:
if c == '(':
stack.append(-1)
else:
cur = 0
while len(stack) > 0 and stack[-1] != -1:
cur += stack.pop()
if len(stack) > 0:
stack.pop()
stack.append(1 if cur == 0 else cur * 2)
return sum(stack)
if __name__ == '__main__':
sol = Solution()
print(sol.scoreOfParentheses('(()(()))'))
| {
"repo_name": "kingdaa/LC-python",
"path": "lc/856_Score_of_Parentheses.py",
"copies": "1",
"size": "1188",
"license": "mit",
"hash": 2671439921528775000,
"line_mean": 19.1355932203,
"line_max": 72,
"alpha_frac": 0.4991582492,
"autogenerated": false,
"ratio": 3.3559322033898304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43550904525898304,
"avg_score": null,
"num_lines": null
} |
"""57. Insert Interval
https://leetcode.com/problems/insert-interval/
Given a set of non-overlapping intervals, insert a new interval into the
intervals (merge if necessary).
You may assume that the intervals were initially sorted according to their
start times.
Example 1:
Input: intervals = [[1,3],[6,9]], newInterval = [2,5]
Output: [[1,5],[6,9]]
Example 2:
Input: intervals = [[1,2],[3,5],[6,7],[8,10],[12,16]], newInterval = [4,8]
Output: [[1,2],[3,10],[12,16]]
Explanation: Because the new interval [4,8] overlaps with [3,5],[6,7],[8,10].
"""
from typing import List
class Solution(object):
def insert_1(self, intervals, new_interval):
pass
def insert_2(self, intervals, new_interval):
"""
:type intervals: List[List[int]]
:type new_interval: List[int]
:rtype: List[List[int]]
"""
def sort_list(lst: List[List[int]]):
return sorted(lst, key=lambda x: x[0])
def is_overlap(l1: List[int], l2: List[int]):
if l1[1] < l2[0] or l1[0] > l2[1]:
return False
return True
def merge_list(l1: List[int], l2: List[int]):
left = min(l1[0], l1[1], l2[0], l2[1])
right = max(l1[0], l1[1], l2[0], l2[1])
l1[0] = left
l1[1] = right
if not new_interval:
return intervals
intervals.append(new_interval)
intervals = sort_list(intervals)
ans = [intervals[0]]
for i in range(1, len(intervals)):
if is_overlap(ans[-1], intervals[i]):
merge_list(ans[-1], intervals[i])
else:
ans.append(intervals[i])
return ans
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/insert_interval.py",
"copies": "1",
"size": "1704",
"license": "mit",
"hash": -3360784160488429600,
"line_mean": 26.9344262295,
"line_max": 77,
"alpha_frac": 0.558685446,
"autogenerated": false,
"ratio": 3.2519083969465647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43105938429465646,
"avg_score": null,
"num_lines": null
} |
583. Delete Operation for Two Strings
Given two words word1 and word2, find the minimum number of steps required to make word1 and word2 the same, where in each step you can delete one character in either string.
Example 1:
Input: "sea", "eat"
Output: 2
Explanation: You need one step to make "sea" to "ea" and another step to make "eat" to "ea".
Note:
The length of given words won't exceed 500.
Characters in given words can only be lower-case letters.
class Solution:
def minDistance(self, A, B):
"""
:type A: str
:type B: str
:rtype: int
"""
# if A == B return 0
# if A[i] != B[j]
# Need to compute the Longest common subsequence of A and B
# result = len(A) + len(B) - 2 * LCS(A, B)
# if there's no LCS then to get A == B need to del all of A and B
# otherwise if LCS exists need to remove LCS from len(A) + len(B)
# calculation.
def lcs(memo, a_idx, b_idx):
# reach base case for A or B
if a_idx == 0 or b_idx == 0:
return 0
if memo[a_idx - 1][b_idx - 1] == -1:
# continue adding to subsequence
if A[a_idx - 1] == B[b_idx - 1]:
memo[a_idx - 1][b_idx - 1] = 1 + lcs(memo, a_idx - 1, b_idx - 1)
else: # delete from A and B
memo[a_idx - 1][b_idx - 1] = max(lcs(memo, a_idx, b_idx - 1),\
lcs(memo, a_idx - 1, b_idx))
return memo[a_idx - 1][b_idx - 1]
memo = [[-1] * len(B) for _ in A]
return len(A) + len(B) - (2 * lcs(memo, len(A), len(B)))
class Solution:
def minDistance(self, A, B):
"""
:type A: str
:type B: str
:rtype: int
O(n*m)time O(m)space
"""
if not A:
return len(B)
elif not B:
return len(A)
DP = [0] * (len(B) + 1)
for a_idx in range(len(A) + 1):
temp = [0] * (len(B) + 1)
for b_idx in range(len(B) + 1):
# if either is 0 then get the index of the other
# first row and column will be 1-len(A) or len(B)
if a_idx == 0 or b_idx == 0:
temp[b_idx] = a_idx + b_idx
# if equal char then carry previous char edit.
elif A[a_idx - 1] == B[b_idx - 1]:
temp[b_idx] = DP[b_idx - 1]
# get min of deleting from A or B
else:
temp[b_idx] = 1 + min(temp[b_idx - 1], DP[b_idx])
# use previous row for next row.
DP = temp
return DP[-1] | {
"repo_name": "carlb15/Python",
"path": "min_operations_for_2_strings.py",
"copies": "1",
"size": "2791",
"license": "mit",
"hash": 7423928261465065000,
"line_mean": 34.3417721519,
"line_max": 174,
"alpha_frac": 0.4632748119,
"autogenerated": false,
"ratio": 3.437192118226601,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4400466930126601,
"avg_score": null,
"num_lines": null
} |
""" 58 zhengzhou ershou house information """
from scrapy.spiders import Spider
from scrapy import Request
from scrapy.exceptions import CloseSpider, DropItem
from five8.items import Five8Item
import re
from five8.mydb import db_context
class ErshouHouseSpider(Spider):
name = '58ershou'
allowed_domains = ['58.com']
count = 0
item_count = 0
start_urls = \
['http://zz.58.com/ershoufang/0/?\
PGTID=0d30000c-0015-675a-6e79-8562022fa87f&ClickID=1']
custom_settings = {
'ITEM_PIPELINES':{
'five8.pipelines.Five8ToDbPipeline': 400
},
'DOWNLOADER_MIDDLEWARES': {
'five8.middlewares.Five8SpiderMiddleware': 543,
}
}
# count for item had been crawled.
duplicated_count = 0
def parse(self, response):
""" parse url from summary page. """
info_urls = response.xpath('//h2[@class="title"]/a/@href').extract()
for info_url in info_urls:
if not db_context.exists_five8_houseurl(info_url):
print('start requesting next item. url:' + info_url)
yield Request(info_url, callback=self.parse_item)
else:
self.duplicated_count += 1
print('this is url has been crawled. url:' + info_url)
for next_page in response.xpath('//div[@class="pager"]/a[@class="next"]/@href').extract():
if self.count < 100:
self.count += 1
print("start requesting next page:", self.count)
yield Request(next_page, callback=self.parse)
def parse_item(self, response):
""" parse house item from info page. """
self.item_count += 1
five8Item = Five8Item()
five8Item['phone'] = \
response.xpath('//p[@class="phone-num"]/text()').extract_first()
five8Item['phone_belong'] = \
response.xpath('//p[@class="phone-belong"]/span/text()').extract_first()
five8Item['expense'] = \
response.xpath('//div[@class="general-item general-expense"]').extract_first()
five8Item['situation'] = \
response.xpath('//div[@class="general-item general-situation"]').extract_first()
five8Item['description'] = \
response.xpath('//p[@class="pic-desc-word"]').extract_first()
five8Item['owner'] = \
response.xpath('//div[contains(@class, "agent-info")]\
/p[contains(@class, "agent-name")]/a/text()').extract_first()
five8Item['district'] = \
response.xpath('//div[contains(@class,"nav-top-bar")]/a[2]/text()').extract_first()
five8Item['sub_district'] = \
response.xpath('//div[contains(@class,"nav-top-bar")]/a[3]/text()').extract_first()
if five8Item['owner'] is None:
five8Item['owner'] = \
response.xpath('//div[contains(@class, "agent-info")]/div/a/text()').extract_first()
print('extract five8item:' + str(five8Item))
return five8Item
| {
"repo_name": "rwecho/fflask_blog",
"path": "five8/five8/spiders/five8_spider.py",
"copies": "1",
"size": "3024",
"license": "apache-2.0",
"hash": -2646751737909206000,
"line_mean": 37.7692307692,
"line_max": 98,
"alpha_frac": 0.5826719577,
"autogenerated": false,
"ratio": 3.6129032258064515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46955751835064513,
"avg_score": null,
"num_lines": null
} |
# 594. Longest Harmonious Subsequence:
"""
We define a harmonious array is an array where the difference between
its maximum value and its minimum value is exactly 1.
Now, given an integer array, you need to find the length of its
longest harmonious subsequence among all its possible subsequences.
Input: [1,3,2,2,5,2,3,7]
Output: 5
Explanation: The longest harmonious subsequence is [3,2,2,2,3].
"""
def findLHS(s):
if not s:
return None
if len(s) == 1:
return 1
count = {}
# count the number of occurence of each characters
for x in s:
count[x] = count.get(x, 0)+1
print("value: {}, count: {}".format(x, count.get(x, 0)))
result = 0
res = []
for x in count:
# res = []
if x+1 in count:
result = max(result, count[x] + count[x+1])
res.append(x)
res.append(x+1)
return result, res
# Testing
print(findLHS([1,3,2,2,5,2,3,7]))
| {
"repo_name": "ledrui/programming-problems",
"path": "leetCode/weekly_contest/longest_Harmonious_Subsequence.py",
"copies": "1",
"size": "1027",
"license": "mit",
"hash": -2081233146562587100,
"line_mean": 24.675,
"line_max": 69,
"alpha_frac": 0.5628042843,
"autogenerated": false,
"ratio": 3.3782894736842106,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.444109375798421,
"avg_score": null,
"num_lines": null
} |
"""59. Spiral Matrix II
https://leetcode.com/problems/spiral-matrix-ii/
Given a positive integer n, generate a square matrix filled with elements from
1 to n^2 in spiral order.
Example:
Input: 3
Output:
[
[ 1, 2, 3 ],
[ 8, 9, 4 ],
[ 7, 6, 5 ]
]
"""
from typing import List
class Solution:
def generate_matrix(self, n: int) -> List[List[int]]:
matrix = [[0] * n for _ in range(n)]
num = 1
i, j, direction = 0, 0, 0
while num <= n * n:
matrix[i][j] = num
num += 1
if direction == 0 and (j < n - 1 and matrix[i][j + 1] == 0) or \
direction == 3 and (matrix[i - 1][j] != 0):
# process right
direction = 0
j += 1
elif direction == 1 and (i < n - 1 and matrix[i + 1][j] == 0) or \
direction == 0 and (j == n - 1 or matrix[i][j + 1] != 0):
# process down
direction = 1
i += 1
elif direction == 2 and (j > 0 and matrix[i][j - 1] == 0) or \
direction == 1 and (j == n - 1 or matrix[i][j + 1] != 0):
# process left
direction = 2
j -= 1
elif direction == 3 and (i > 0 and matrix[i - 1][j] == 0) or \
direction == 2 and (j == n - 1 or matrix[i][j + 1] != 0):
# process up
direction = 3
i -= 1
return matrix
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/spiral_matrix_ii.py",
"copies": "1",
"size": "1492",
"license": "mit",
"hash": -4323743834863863000,
"line_mean": 29.4489795918,
"line_max": 78,
"alpha_frac": 0.4302949062,
"autogenerated": false,
"ratio": 3.577937649880096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45082325560800957,
"avg_score": null,
"num_lines": null
} |
# 5a
def rev(l):
return [x for x in l[::-1]]
assert(rev([1,2,3]) == [3,2,1])
# 5b
def rev(l):
return reduce(lambda a, n: [n] + a, l, [])
assert(rev([1,2,3]) == [3,2,1])
# 6
def print_some(l):
print_ret = -1 in l
def decorator(f):
def decorated(*args):
for x in l:
if x >= 0:
try:
print "Arg %d: %d" % (x, args[x])
except IndexError:
pass
result = f(*args)
if print_ret:
print "Return: %d" % result
return result
return decorated
return decorator
@print_some([-1,1,0])
def plus(x, y):
print "-- plus called --"
return x + y
print(plus(1, 2))
@print_some([-2, 100])
def plus(x, y):
print "-- plus called --"
return x + y
print(plus(1, 2))
@print_some([-1, 0])
def fac(n):
print "-- fac called --"
if n is 0: return 1
else: return n * fac(n - 1)
print(fac(2))
# 7a
class Tree:
def __init__(self, name, children):
self.name = name
self.children = children
# Returns True if the Tree represents a prolog variable (e.g. X),
# and False otherwise
def is_var(self):
try:
int(self.name)
return False
except ValueError:
return self.children == []
# Returns the string representation of the Tree as a Prolog term.
def __repr__(self):
children = ', '.join(map(repr, self.children))
return '%s%s' % (self.name, '(%s)' % children if children else '')
# Constructs a Tree representing a Prolog variable with name n
def var(n):
return Tree(n, [])
# Constructs a Tree representing a non-variable term with name n
# and children c
def node(n, c):
return Tree(n, c)
def apply_to_tree(s, t):
if not t.is_var():
return node(t.name, [apply_to_tree(s, c) for c in t.children])
elif t.name in s:
return apply_to_tree(s, s[t.name])
else:
return t
s1 = {}
s1['X'] = node('foo', [node(5, [])])
s2 = s1.copy()
s2['Y'] = node('baz', [node(10, []), var('X')])
t1 = node('bat', [var('X')])
t2 = node('bat', [var('X'), var('Y')])
assert(repr(apply_to_tree(s1, t1)) == 'bat(foo(5))')
assert(repr(apply_to_tree(s2, t2)) == 'bat(foo(5), baz(10, foo(5)))')
assert(repr(apply_to_tree(s1, t2)) == 'bat(foo(5), Y)')
# 7b
def unify(a, b, s={}):
a = apply_to_tree(s, a)
b = apply_to_tree(s, b)
result = s.copy()
if a.is_var() and b.is_var():
result[a.name] = b
elif a.is_var() and not b.is_var():
if a.name in result:
unify(result[a.name], b, result)
else:
result[a.name] = b
elif not a.is_var() and b.is_var():
return unify(b, a, s)
elif not a.is_var() and not b.is_var():
if a.name != b.name:
return False
if len(a.children) != len(b.children):
return False
for ca, cb in (cs for cs in zip(a.children, b.children)
if result is not False):
result = unify(ca, cb, result)
return result
t1 = node("foo", [var("X"),node(5,[])])
t2 = node("foo", [node(10,[]),var("Y")])
t3 = node("foo", [node(10,[]),var("X")])
t4 = node("bar", [var("X"),var("Y"),var("X")])
t5 = node("bar", [node("foo", [var("Y")]),node("3",[]),node("foo", [node("3",[])])])
t6 = node("bar", [node("foo", [var("Y")]),node("3",[]),node("foo", [node("4",[])])])
assert(repr(unify(t1,t2)) == repr({'Y': 5, 'X': 10}))
assert(repr(unify(t1,t3)) == repr(False))
assert(repr(unify(t4,t5)) == repr({'Y': 3, 'X': node('foo', [var('Y')])}))
assert(repr(unify(t4,t6)) == repr(False))
| {
"repo_name": "metakirby5/cse130-exams",
"path": "finals/sp13.py",
"copies": "1",
"size": "3673",
"license": "mit",
"hash": -3892894689803983400,
"line_mean": 26.2074074074,
"line_max": 84,
"alpha_frac": 0.51184318,
"autogenerated": false,
"ratio": 2.8762725137039937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38881156937039935,
"avg_score": null,
"num_lines": null
} |
5# -*- coding: cp1252 -*-
#Copyright © 2015 B3nac
import os
import sys
import random
import math
import pygame as pg
from gamedata import constants as a
from gameobjects import galaxywh
from gamedata import player
from gamedata import bullet
import SpaceRiot
class Control(object):
"""Class that manages primary conrol flow for the whole program."""
def __init__(self):
pg.mixer.music.play(10)
self.screen = pg.display.get_surface()
self.screen_rect = self.screen.get_rect()
self.clock = pg.time.Clock()
self.fps = 20.0
self.keys = pg.key.get_pressed()
self.done = False
self.font = pg.font.Font("resource/fonts/freesansbold.ttf", 24)
self._image_library = {}
self.setup_sprites()
self.player = player.Player(self.get_image('resource/images/sprite.png'), (390,400), self.player_sprite)
self.health = 10
self.lives = 4
self.level = 1
self.scene = 0
def get_image(self, _path):
image = self._image_library.get(_path)
if image == None:
butt_path = _path.replace('/', os.sep).replace('\\', os.sep)
image = pg.image.load(butt_path).convert_alpha()
self._image_library[butt_path] = image
return image
#Not finished
def highScore(self):
""" Main program is here. """
try:
self.high_score_file = open("high_score.txt", "r")
self.player.high_score = int(self.high_score_file.read())
self.high_score_file.close()
print("The high score is", self.player.high_score)
except IOError:
print("There is no high score yet.")
except ValueError:
print("I'm confused. Starting with no high score.")
except ValueError:
print("I don't understand what you typed.")
if self.player.score > self.player.high_score:
print("Woot! New high score!")
try:
#Write the file to disk
self.high_score_file = open("high_score.txt", "w")
self.high_score_file.write(str(self.player.score))
self.high_score_file.close()
except IOError:
#Hm, can't write it.
print("Too bad I couldn't save it.")
else:
print("Better luck next time.")
def setup_sprites(self):
"""Create all our sprite groups."""
self.player_sprite = pg.sprite.Group()
self.all_sprites = pg.sprite.Group()
self.bullets = pg.sprite.Group()
self.galaxywh = pg.sprite.Group()
self.make_galaxywh(1)
def make_galaxywh(self, amount):
for i in range(amount):
x = random.randrange(a.SCREEN_WIDTH-self.get_image('resource/images/galaxywh.png').get_width())
y = random.randrange(100)
galaxywh.Galaxy(self.get_image('resource/images/galaxywh.png'), (100,100), self.galaxywh)
def galaxy(self):
collide = pg.sprite.spritecollideany(self.player, self.galaxywh)
if collide:
collide.kill()
self.scene = 2
def event_loop(self):
"""Event loop for program; there can be only one."""
for event in pg.event.get():
if event.type == pg.QUIT or self.keys[pg.K_ESCAPE]:
self.done = True
if event.type == pg.KEYDOWN and event.key == pg.K_f:
pg.display.toggle_fullscreen()
elif event.type in (pg.KEYDOWN, pg.KEYUP):
self.keys = pg.key.get_pressed()
if self.keys[pg.K_SPACE]:
self.player.shoot(self.bullets, self.all_sprites)
LASER.play()
def update(self):
"""Update all sprites and check collisions."""
self.player_sprite.update(self.keys, self.screen_rect)
self.all_sprites.update(self.keys, self.screen_rect)
self.galaxy()
if self.scene == 2:
SpaceRiot.main()
if self.lives == 0:
main()
def draw(self):
"""Draw all items to the screen."""
self.screen.fill(a.WHITE)
self.screen.blit(self.get_image('resource/images/spacebg.png'), [0,0])
self.player_sprite.draw(self.screen)
self.all_sprites.draw(self.screen)
self.galaxywh.draw(self.screen)
#for self.explosion in self.explosions:
#self.explosion.display()
text = self.font.render("Score: {}".format(self.player.score), True, a.WHITE)
self.screen.blit(text, (10,5))
text = self.font.render("Health: {}".format(self.health), True, a.WHITE)
self.screen.blit(text, (10,35))
text = self.font.render("Lives: {}".format(self.lives), True, a.WHITE)
self.screen.blit(text, (10,65))
text = self.font.render("Highscore: {}".format(self.player.high_score), True, a.WHITE)
self.screen.blit(text, (550,5))
def main_loop(self):
while not self.done:
self.event_loop()
self.update()
self.draw()
pg.display.update()
self.clock.tick(self.fps)
def main():
global LASER, EXPLO
pg.init()
pg.mixer.pre_init(44100, -16, 2, 128)
pg.display.set_mode((a.SCREEN_WIDTH, a.SCREEN_HEIGHT))
pg.display.set_caption("SpaceRiot v0.0.5")
#Convert all image_loads to use cache function.
pg.mixer.music.load('resource/sounds/Its like Im walking on air.ogg')
LASER = pg.mixer.Sound('resource/sounds/laser.ogg')
EXPLO = pg.mixer.Sound('resource/sounds/explosion.ogg')
app = Control()
app.main_loop()
pg.quit()
sys.exit()
if __name__ == "__main__":
main()
| {
"repo_name": "B3nac/SpaceRiot0.0.5",
"path": "galaxyMap.py",
"copies": "1",
"size": "5714",
"license": "mit",
"hash": -411815346857102500,
"line_mean": 35.864516129,
"line_max": 112,
"alpha_frac": 0.580679034,
"autogenerated": false,
"ratio": 3.57125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9586247097587792,
"avg_score": 0.013136387282441531,
"num_lines": 155
} |
""" 5 Kyu - sum of pairs kata.
I was having difficulty getting this kata to run within the
12000ms time constraint.
Eventually, I collaborated with another programmer to
determine that xrange was necessary for "lazy" loop evaluation.
This would prevent the entire loop from being evaluated
prior to the function return."""
def sum_pairs(ints, s):
values_index = {} # Dictionary with the value as index, and key as the list location
for i in xrange(len(ints)): # Runs a loop for as many items in ints
if s - ints[i] in values_index.keys(): # If the value we are on is one of the keys in the dictionary
for num in values_index[s - ints[i]]: # for every value that works as second number in the dictionary
return [ints[num], ints[i]] # return the outer i as second num, inner 'num' as first num
else:
values_index[ints[i]] = [i] #Store the value in the dictionary with key as value and value as index
# Essentially, it's creating possible matches as indexes of each i value as it goes
# That i value will always be the first number,'num' since it was entered into
# the index first
return None
""" Solution that didn't work due to time"""
def sum_pairs(ints, s):
best_tuple = ()
finals = set()
for ind2, num2 in enumerate(tuple(ints)):
for ind1, num1 in enumerate(tuple(ints)):
if (num1 + num2) == s:
finals.add(((ind1, num1), (ind2, num2)))
if finals == set():
return None
else:
best_tuple = finals.pop()
for pair in finals:
if (pair[0] == pair[1]) or (pair[0][0] > pair[1][0]):
continue
elif pair[1][0] < best_tuple[1][0]:
best_tuple = tuple(pair)
elif (pair[1][0] == best_tuple[1][0]) and (pair[0][0] < best_tuple[0][0]):
best_tuple = tuple(pair)
else:
pass
return [best_tuple[0][1], best_tuple[1][1]]
"""SIMPLER SOLUTION, lost to time constraints"""
def sum_pairs(ints, s):
for ind2, num2 in enumerate(tuple(ints)):
for num1 in set(ints[:ind2]):
if num1 + num2 == s:
return [num1, num2]
return None
| {
"repo_name": "tylerc-atx/script_library",
"path": "solved_5kyu-sumofpairs.py",
"copies": "1",
"size": "2187",
"license": "mit",
"hash": 6166104548391995000,
"line_mean": 33.7142857143,
"line_max": 113,
"alpha_frac": 0.6154549611,
"autogenerated": false,
"ratio": 3.5970394736842106,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.471249443478421,
"avg_score": null,
"num_lines": null
} |
# 5 Layer DNN for digits 0 to 4 of MNIST
# 100 neurons in each layer
# ADAM optimization and early stopping
# Import modules and data
import tensorflow as tf
import numpy as np
from tensorflow.contrib.layers import fully_connected, batch_norm, dropout
from tensorflow.examples.tutorials.mnist import input_data
# Set random seed
tf.set_random_seed(123)
np.random.seed(123)
n_inputs = 28 * 28
n_hidden_1 = 100
n_hidden_2 = 100
n_hidden_3 = 100
n_hidden_4 = 100
n_hidden_5 = 100
learning_rate = 0.005
# Use only digits 0 to 4
n_outputs = 5
# Get data and separate digits 0-4 out
mnist = input_data.read_data_sets("/tmp/data/")
X_images, y_images = mnist.train.images, mnist.train.labels
X_images_test, y_images_test = mnist.test.images, mnist.test.labels
# Create 'index' and subset of MNIST
indices = [idx for idx in range(len(y_images)) if y_images[idx] < 5]
X_masked_train = X_images[indices]
y_masked_train = y_images[indices]
# Do same for test set
indices_test = [idx for idx in range(len(y_images_test)) if y_images_test[idx] < 5]
X_test = X_images_test[indices_test]
y_test = y_images_test[indices_test]
validation_metrics = {
"accuracy":
tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key=tf.contrib.learn.prediction_key.PredictionKey.CLASSES)}
validation_monitor = tf.contrib.learn.monitors.ValidationMonitor(
x=X_test, y=y_test, early_stopping_rounds=50, metrics=validation_metrics)
# Construct graph
# Use He initalization
he_init = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name='X')
y = tf.placeholder(tf.int64, shape=(None), name='y')
# Set up necessary variables for batch norm
is_training = tf.placeholder(tf.bool, shape=(), name='Is_Training')
bn_params = {'is_training': is_training, 'decay': 0.999, 'updates_collections': None}
# Set up drop out regularization
keep_prob = 0.5
with tf.contrib.framework.arg_scope([fully_connected],
normalizer_fn=batch_norm, normalizer_params=bn_params,
weights_initializer=he_init, scope='DNN'):
X_drop = dropout(X, keep_prob, is_training=is_training)
hidden_1 = fully_connected(X_drop, n_hidden_1,
activation_fn=tf.nn.elu, scope='Hidden_1')
hidden_1_drop = dropout(hidden_1, keep_prob, is_training=is_training)
hidden_2 = fully_connected(hidden_1_drop,
n_hidden_2, activation_fn=tf.nn.elu, scope='Hidden_2')
hidden_2_drop = dropout(hidden_2, keep_prob, is_training=is_training)
hidden_3 = fully_connected(hidden_2_drop,
n_hidden_3, activation_fn=tf.nn.elu, scope='Hidden_3')
hidden_3_drop = dropout(hidden_3, keep_prob, is_training=is_training)
hidden_4 = fully_connected(hidden_3_drop,
n_hidden_4, activation_fn=tf.nn.elu, scope='Hidden_4')
hidden_4_drop = dropout(hidden_4, keep_prob, is_training=is_training)
hidden_5 = fully_connected(hidden_4_drop,
n_hidden_5, activation_fn=tf.nn.elu, scope='Hidden_5')
hidden_5_drop = dropout(hidden_5, keep_prob, is_training=is_training)
logits = fully_connected(hidden_5_drop,
n_outputs, activation_fn=None, scope='Outputs')
with tf.name_scope('Loss'):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name='Loss')
with tf.name_scope('Train'):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
with tf.name_scope('Eval'):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
# Execution
n_epochs = 1000
batch_size = 200
batches = len(y_masked_train)//batch_size
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for k in range(batches):
X_batch = X_masked_train[k*batch_size:k*batch_size+batch_size]
y_batch = y_masked_train[k*batch_size:k*batch_size+batch_size]
sess.run(training_op, feed_dict={is_training: True, X: X_batch, y: y_batch})
# print('Max logits: ', max_logits.eval(feed_dict={X: X_test}))
# print('Max labels: ', max_labels.eval(feed_dict={y: y_test}))
acc_train = accuracy.eval(feed_dict={is_training: False, X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={is_training: False, X: X_test, y: y_test})
if epoch % 5 == 0:
print(epoch, "Train accuracy: ", acc_train, "Test accuracy: ", acc_test)
| {
"repo_name": "KT12/hands_on_machine_learning",
"path": "5_layer_dnn.py",
"copies": "1",
"size": "4533",
"license": "mit",
"hash": 5207969959014310000,
"line_mean": 37.4152542373,
"line_max": 89,
"alpha_frac": 0.6898301346,
"autogenerated": false,
"ratio": 3.0240160106737823,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9203119851503971,
"avg_score": 0.0021452587539623432,
"num_lines": 118
} |
# 5. Longest Palindromic Substring - LeetCode
# https://leetcode.com/problems/longest-palindromic-substring/description/
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
s = "#" + "#".join(s) + "#"
def check_palindrome(start,end):
if start < 0 or end >= len(s):
return False
while end > start:
if s[start] != s[end]:
return False
end -= 1
start += 1
return True
odd_max = 1
odd_start = 0
odd_end = 0
for i in xrange(len(s)-1):
if i - odd_max < 0 or i + odd_max > len(s):
continue
if check_palindrome(i-odd_max,i+odd_max): # odd
start = i - odd_max
end = i + odd_max
while start >= 0 and end < len(s):
if s[start] == s[end]:
odd_max += 1
odd_start = start
odd_end = end
else:
break
start -= 1
end += 1
return s[odd_start:odd_end+1].replace("#","")
ans = [
("",[""]),
("a",["a"]),
("ab",["a","b"]),
("babad",["bab","aba"]),
("cbbd",["bb"]),
("abcd",["a","b","c","d"]),
("abbbc",["bbb"]),
("abbbbc",["bbbb"]),
("abb",["bb"]),
]
s = Solution()
for i in ans:
r = s.longestPalindrome_brutal(i[0])
print r, r in i[1] | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/005_longest-palindromic-substring.py",
"copies": "1",
"size": "1580",
"license": "mit",
"hash": -2259868846164747500,
"line_mean": 27.7454545455,
"line_max": 74,
"alpha_frac": 0.3949367089,
"autogenerated": false,
"ratio": 3.648960739030023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4543897447930023,
"avg_score": null,
"num_lines": null
} |
"""5. Longest Palindromic Substring
Given a string S, find the longest palindromic substring in S.
You may assume that the maximum length of S is 1000, and there exists one unique longest palindromic substring.
"""
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
workstr = '^' + '#'.join(s) + '$'
radiuses = [0] * len(workstr)
center = edge = 0
center_max = 1
for i in range(1, len(workstr) - 1):
if edge > i:
radiuses[i] = min(radiuses[center * 2 - i], edge - i)
else:
radiuses[i] = 0
while workstr[i + radiuses[i] + 1] == workstr[i - radiuses[i] - 1]:
radiuses[i] += 1
if workstr[i + radiuses[i]] == '#':
radiuses[i] -= 1
if i + radiuses[i] > edge:
edge = i + radiuses[i]
center = i
if radiuses[center] >= radiuses[center_max]:
center_max = center
return workstr[center_max - radiuses[center_max]: center_max + radiuses[center_max] + 1].replace('#', '')
| {
"repo_name": "nadesico19/nadepy",
"path": "leetcode/algo_5_longest_palindromic_substring.py",
"copies": "1",
"size": "1220",
"license": "mit",
"hash": 328861900666210100,
"line_mean": 30.1052631579,
"line_max": 113,
"alpha_frac": 0.4885245902,
"autogenerated": false,
"ratio": 3.685800604229607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9672016614206445,
"avg_score": 0.00046171604463255095,
"num_lines": 38
} |
''' 5-statistics-error.py
=========================
AIM: Perform basic statistics on the data and gets the maximal stray light flux for one orbit
INPUT: files: - <orbit_id>_misc/orbits.dat
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_misc/ : file one stat file
in <orbit_id>_figures/ : error evolution, max. stray light evolution
CMD: python 5-statistics-error.py
ISSUES: <none known>
REQUIRES:- standard python libraries, specific libraries in resources/
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/ --> figures
* <orbit_id>_misc/ --> storages of data
* all_figures/ --> comparison figures
REMARKS: <none>
'''
###########################################################################
### INCLUDES
import numpy as np
import pylab as plt
import os
from resources.routines import *
from resources.TimeStepping import *
import parameters as param
import resources.figures as figures
from matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter
###########################################################################
### PARAMETERS
# Orbit id
orbit_id = 1001
# Error threshold
p = 0.1
# Flux limitation [ph/(px s)]
rqmt_flux = 1
# File name for the output data file (same as in 2-statistics-step.py)
data_file = 'statistics-error.dat'
# Show plots and detailled analysis ?
show = True
# Fancy plots ?
fancy = True
###########################################################################
### INITIALISATION
# File name for the computed orbit file
error_file = 'error_evolution.dat'
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
if fancy: figures.set_fancy()
if os.path.isfile(folder_misc+data_file):
os.remove(folder_misc+data_file)
f = open(folder_misc+data_file,'w')
###########################################################################
### Load which orbits were computed
data = np.loadtxt(folder_misc+error_file, delimiter=',')
# Data type:
# ref,val,step,error,max_sl,shift
### Error evolution
print >> f, '# ERRORS'
print >> f, '# ! All errors are normalised to 1'
print >> f, '# ! ie 1.0 = 100%'
print >> f, 'error_max:', np.amax(data[:,3])
print >> f, 'error_min:', np.amin(data[:,3])
print >> f, 'error_mean:', np.mean(data[:,3])
print >> f, 'error_std:', np.std(data[:,3])
fig=plt.figure()
ax=plt.subplot(111)
ax.yaxis.set_major_locator(MultipleLocator(5))
ax.yaxis.set_minor_locator(MultipleLocator(1))
ax.xaxis.grid(True,'minor')
ax.yaxis.grid(True,'minor')
ax.xaxis.grid(True,'major',linewidth=2)
ax.yaxis.grid(True,'major',linewidth=2)
xx = data[:,1]/param.last_orbits[orbit_id]*365.
xx = figures.convert_date(xx)
plt.plot(xx, data[:,3]*100, linewidth=1.5)
plt.plot([xx[0],xx[-1]], [p*100., p*100.], color='r', lw=3)
fig.autofmt_xdate()
plt.ylim([0, 15])
plt.ylabel(r'$\mathrm{Error\ to\ previous\ step\ [\%]}$')
# Saves the figure
fname = '%serror_evolution_%d_%d' % (folder_figures,orbit_id,sl_angle)
figures.savefig(fname,fig,fancy)
############ STRAY LIGHT
print >> f, '# STRAY LIGHT'
# Get the direction of minimum stray light
id_min = find_nearest(data[:,4],np.amin(data[np.where(data[:,4]>0)]))
orbit_max = data[id_min, 1]
time_min, ra_min, dec_min, sl_min = find_direction_flux(orbit_max, orbit_id,find='min', folder=folder_flux)
print >> f, 'min:', sl_min
print >> f, 'minute_min:', time_min
print >> f, 'RA_min:', ra_min
print >> f, 'DEC_min:', dec_min
print >> f, 'mean:', np.mean(data[:,4])
print >> f, 'stddev:', np.std(data[:,4])
# Get the direction of maximum stray light
id_max = find_nearest(data[:,4],np.amax(data[:,4]))
orbit_max = data[id_max, 1]
time_max, ra_max, dec_max, sl_max = find_direction_flux(orbit_max, orbit_id, folder=folder_flux)
print >> f, 'max:', np.amax(sl_max)
print >> f, 'minute_max:', time_max
print >> f, 'RA_max:', ra_max
print >> f, 'DEC_max:', dec_max
print >> f, 'mean:', np.mean(data[:,4])
print >> f, 'stddev:', np.std(data[:,4])
print >> f, 'orbit_above_rqmt:', np.shape(data[np.where(data[:,4]>rqmt_flux)])[0]
print >> f, 'total_orbits:', np.shape(data)[0]
### Maximal sl
fig=plt.figure()
ax=plt.subplot(111)
ax.yaxis.set_major_locator(MultipleLocator(0.2))
ax.yaxis.set_minor_locator(MultipleLocator(0.1))
ax.xaxis.grid(True,'minor')
ax.yaxis.grid(True,'minor')
ax.xaxis.grid(True,'major',linewidth=2)
ax.yaxis.grid(True,'major',linewidth=2)
plt.plot(xx, data[:,4], linewidth=3)
plt.plot([xx[0],xx[-1]], [rqmt_flux, rqmt_flux], color='r', lw=3)
fig.autofmt_xdate()
plt.ylabel(r'$\mathrm{Maximum\ stray\ light\ flux\ }\left[\frac{\mathrm{ph}}{\mathrm{px}\cdot\mathrm{s}}\right]$')
# Saves the figure
fname = '%sstray_light_flux_%d_%d' % (folder_figures,orbit_id,sl_angle)
figures.savefig(fname,fig,fancy)
####################################################################
fig=plt.figure()
ax=plt.subplot(111)
# zooms
ax.yaxis.set_major_locator(MultipleLocator(0.1))
ax.yaxis.set_minor_locator(MultipleLocator(0.02))
#ax.xaxis.set_major_locator(MultipleLocator(20.))
ax.xaxis.grid(True,'minor')
ax.yaxis.grid(True,'minor')
ax.xaxis.grid(True,'major',linewidth=2)
ax.yaxis.grid(True,'major',linewidth=2)
plt.plot(xx, data[:,4], linewidth=3)
fig.autofmt_xdate()
plt.ylim([0, 0.2])
plt.ylabel(r'$\mathrm{Maximum\ stray\ light\ flux\ }\left[\frac{\mathrm{ph}}{\mathrm{px}\cdot\mathrm{s}}\right]$')
# Saves the figure
fname = '%sstray_light_flux_zoom_%d_%d' % (folder_figures,orbit_id,sl_angle)
figures.savefig(fname,fig,fancy)
if show: plt.show()
f.close()
| {
"repo_name": "kuntzer/SALSA-public",
"path": "5_statistics_error.py",
"copies": "1",
"size": "5532",
"license": "bsd-3-clause",
"hash": -4815206374991323000,
"line_mean": 28.1157894737,
"line_max": 114,
"alpha_frac": 0.6352133044,
"autogenerated": false,
"ratio": 2.8544891640866874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8924813314257329,
"avg_score": 0.012977830845871537,
"num_lines": 190
} |
# 5. El Emperador esta celebrando aniversario y ofrecera a sus clientes una serie
#de ofertas que se traduciran en un incremento de sus ventas Las reglas de las ofertas
#se basan en un porcentaje de descuento sobre el total de compra, que estarian variando
#dependiendo del monto adquirido: Por un ponton mayor o igual a $500 descuento del 30 Por
#un monto menor de $500 pero mayor o igual a $200 despuento del 20% Por un monto menor de
#$200 pero mayor o igual a $100 descuento del 10% Elabore este programa considerando 5
#susuarios por ejecución
descuento1= 0.30
descuento2= 0.20
descuento3= 0.10
while clientes < 5:
clientes = int(input("ingrese cliente: "))
clientes +=1
compra=
int(input("ingrese compra: "))
if compra >=500:
descuento = compra*descuento1
total = compra+descuento
print (total a pagar el cliente es {0} ".format (total ))
if compra < 500 or >= 200:
descuento = compra*descuento2
total = compra+descuento
print (total a pagar el cliente es {0} ".format (total ))
if compra < 200 0r >= 100:
descuento = compra*descuento3
total = compra+descuento
print (total a pagar el cliente es {0} ".format (total ))
else
print("no existe descuento")
| {
"repo_name": "eliecer11/Uip-prog3",
"path": "laboratorios/quiz2/quiz-2.py",
"copies": "1",
"size": "1205",
"license": "mit",
"hash": 3394683489740133400,
"line_mean": 30.6842105263,
"line_max": 89,
"alpha_frac": 0.7259136213,
"autogenerated": false,
"ratio": 2.4824742268041238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3708387848104124,
"avg_score": null,
"num_lines": null
} |
""" 5th order Runge-Kutta integration. """
# Third-party
import numpy as np
# Project
from ..core import Integrator
from ..timespec import parse_time_specification
__all__ = ["RK5Integrator"]
# These are the Dormand-Prince parameters for embedded Runge-Kutta methods
A = np.array([0.0, 0.2, 0.3, 0.6, 1.0, 0.875])
B = np.array([[0.0, 0.0, 0.0, 0.0, 0.0],
[1./5., 0.0, 0.0, 0.0, 0.0],
[3./40., 9./40., 0.0, 0.0, 0.0],
[3./10., -9./10., 6./5., 0.0, 0.0],
[-11./54., 5./2., -70./27., 35./27., 0.0],
[1631./55296., 175./512., 575./13824., 44275./110592., 253./4096.]
])
C = np.array([37./378., 0., 250./621., 125./594., 0., 512./1771.])
D = np.array([2825./27648., 0., 18575./48384., 13525./55296.,
277./14336., 1./4.])
class RK5Integrator(Integrator):
r"""
Initialize a 5th order Runge-Kutta integrator given a function for
computing derivatives with respect to the independent variables. The
function should, at minimum, take the independent variable as the
first argument, and the coordinates as a single vector as the second
argument. For notation and variable names, we assume this independent
variable is time, t, and the coordinate vector is named x, though it
could contain a mixture of coordinates and momenta for solving
Hamilton's equations, for example.
.. seealso::
- http://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
Parameters
----------
func : func
A callable object that computes the phase-space coordinate
derivatives with respect to the independent variable at a point
in phase space.
func_args : tuple (optional)
Any extra arguments for the function.
func_units : `~gala.units.UnitSystem` (optional)
If using units, this is the unit system assumed by the
integrand function.
"""
def step(self, t, w, dt):
""" Step forward the vector w by the given timestep.
Parameters
----------
dt : numeric
The timestep to move forward.
"""
# Runge-Kutta Fehlberg formulas (see: Numerical Recipes)
F = lambda t, w: self.F(t, w, *self._func_args)
K = np.zeros((6,)+w.shape)
K[0] = dt * F(t, w)
K[1] = dt * F(t + A[1]*dt, w + B[1][0]*K[0])
K[2] = dt * F(t + A[2]*dt, w + B[2][0]*K[0] + B[2][1]*K[1])
K[3] = dt * F(t + A[3]*dt, w + B[3][0]*K[0] + B[3][1]*K[1] + B[3][2]*K[2])
K[4] = dt * F(t + A[4]*dt, w + B[4][0]*K[0] + B[4][1]*K[1] + B[4][2]*K[2] + B[4][3]*K[3])
K[5] = dt * F(t + A[5]*dt, w + B[5][0]*K[0] + B[5][1]*K[1] + B[5][2]*K[2] + B[5][3]*K[3] + B[5][4]*K[4])
# shift
dw = np.zeros_like(w)
for i in range(6):
dw = dw + C[i]*K[i]
return w + dw
def run(self, w0, mmap=None, **time_spec):
# generate the array of times
times = parse_time_specification(self._func_units, **time_spec)
n_steps = len(times)-1
dt = times[1]-times[0]
w0_obj, w0, ws = self._prepare_ws(w0, mmap, n_steps=n_steps)
# Set first step to the initial conditions
ws[:, 0] = w0
w = w0.copy()
range_ = self._get_range_func()
for ii in range_(1, n_steps+1):
w = self.step(times[ii], w, dt)
ws[:, ii] = w
return self._handle_output(w0_obj, times, ws)
| {
"repo_name": "adrn/gala",
"path": "gala/integrate/pyintegrators/rk5.py",
"copies": "2",
"size": "3474",
"license": "mit",
"hash": -4285348254675915300,
"line_mean": 34.0909090909,
"line_max": 112,
"alpha_frac": 0.540875072,
"autogenerated": false,
"ratio": 2.951571792693288,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.948935067005716,
"avg_score": 0.0006192389272257558,
"num_lines": 99
} |
"""5th update that creates Requests table and adds a backref to game and users
Revision ID: 2b7d1717f5cb
Revises: 32a8ac91d28d
Create Date: 2015-11-09 13:07:07.328317
"""
# revision identifiers, used by Alembic.
revision = '2b7d1717f5cb'
down_revision = '32a8ac91d28d'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('requests',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('time_requested', sa.DateTime(), nullable=False),
sa.Column('game_id', sa.Integer(), nullable=False),
sa.Column('http_verb', sa.String(), nullable=False),
sa.Column('uri', sa.String(), nullable=False),
sa.Column('status', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['game_id'], ['games.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column(u'users', sa.Column('game_id', sa.Integer(), nullable=False))
op.create_foreign_key(None, 'users', 'games', ['game_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'users', type_='foreignkey')
op.drop_column(u'users', 'game_id')
op.drop_table('requests')
### end Alembic commands ###
| {
"repo_name": "Rdbaker/Rank",
"path": "migrations/versions/2b7d1717f5cb_.py",
"copies": "2",
"size": "1352",
"license": "mit",
"hash": 3731670218726443000,
"line_mean": 32.8,
"line_max": 79,
"alpha_frac": 0.6664201183,
"autogenerated": false,
"ratio": 3.3137254901960786,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9936517259538142,
"avg_score": 0.008725669791587154,
"num_lines": 40
} |
5#!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
from datetime import datetime
from datetime import date
from datetime import time
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from models import ConflictException
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import StringMessage
from models import BooleanMessage
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import TeeShirtSize
from models import Session
from models import SessionForm
from models import SessionForms
from models import SessionType
from models import Speaker
from models import SpeakerForm
from models import SpeakerForms
from settings import WEB_CLIENT_ID
from settings import ANDROID_CLIENT_ID
from settings import IOS_CLIENT_ID
from settings import ANDROID_AUDIENCE
from utils import getUserId
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
MEMCACHE_FEATURED_SPEAKER = "FEATURED_SPEAKER"
ANNOUNCEMENT_TPL = ('Last chance to attend! The following conferences '
'are nearly sold out: %s')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [ "Default", "Topic" ],
}
SESSION_DEFAULTS = {
"typeOfSession": "Other",
"highlights": "",
"duration": 1.0,
"startTime": "12:00"
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_TYPE_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
typeOfSession=messages.EnumField(SessionType, 2)
)
SPEAKER_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeSpeakerKey=messages.StringField(1)
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
SESSION_GET_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeSessionKey=messages.StringField(1)
)
SESSION_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1),
websafeSpeakerKey=messages.StringField(2)
)
SESSION_DELETE_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeSessionKey=messages.StringField(1)
)
SPEAKER_POST_REQUEST = endpoints.ResourceContainer(
SpeakerForm,
websafeSpeakerKey=messages.StringField(1)
)
WISHLIST_POST_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeSessionKey=messages.StringField(1)
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api(name='conference', version='v1',
audiences=[ANDROID_AUDIENCE],
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID,
ANDROID_CLIENT_ID, IOS_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object,
returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException(
"Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) \
for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(
data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(
data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = ndb.Key(Profile, user_id)
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email'
)
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) \
for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(
conf, getattr(prof, 'displayName')) for conf in confs]
)
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"],
filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) \
for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException(
"Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException(
"Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId)) \
for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(
conf, names[conf.organizerUserId]) for conf in conferences]
)
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name,
getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore,
creating new one if non-existent."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
#if field == 'teeShirtSize':
# setattr(prof, field, str(val).upper())
#else:
# setattr(prof, field, val)
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = ANNOUNCEMENT_TPL % (
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
return StringMessage(
data=memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or "")
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser() # get user Profile
conf_keys = [ndb.Key(urlsafe=wsck) \
for wsck in prof.conferenceKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# get organizers
organisers = [ndb.Key(Profile, conf.organizerUserId) \
for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(
conf, names[conf.organizerUserId]) for conf in conferences]
)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Unregister user for selected conference."""
return self._conferenceRegistration(request, reg=False)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='filterPlayground',
http_method='GET', name='filterPlayground')
def filterPlayground(self, request):
"""Filter Playground"""
q = Conference.query()
# field = "city"
# operator = "="
# value = "London"
# f = ndb.query.FilterNode(field, operator, value)
# q = q.filter(f)
q = q.filter(Conference.city=="London")
q = q.filter(Conference.topics=="Medical Innovations")
q = q.filter(Conference.month==6)
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in q]
)
def _copySessionToForm(self, session):
"""Copy relevant fields from Session to SessionForm."""
# copy relevant fields from Session to SessionForm
form = SessionForm()
setattr(form, 'websafeKey', session.key.urlsafe())
for field in form.all_fields():
if hasattr(session, field.name):
# convert session type enum to string; just copy others
if field.name == 'typeOfSession':
if getattr(session, field.name) == '' or getattr(session, field.name) is None:
setattr(form, field.name, 'Other')
else:
setattr(form, field.name, getattr(SessionType,
getattr(session, field.name)))
elif field.name == 'conferenceKey' or field.name == 'speakerKey':
value = getattr(session, field.name)
if value is not None:
setattr(form, field.name,
getattr(session, field.name).urlsafe())
else:
setattr(form, field.name, '')
elif field.name == 'startTime' or field.name == 'date':
setattr(form, field.name,
str(getattr(session, field.name)))
else:
setattr(form, field.name,
getattr(session, field.name))
form.check_initialized()
return form
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='getConferenceSessions',
http_method='GET', name='getConferenceSessions')
def getConferenceSessions(self, request):
"""Get Conference Sessions"""
query = Session.query(ancestor=ndb.Key(
urlsafe=request.websafeConferenceKey))
return SessionForms(
items=[self._copySessionToForm(session) for session in query]
)
@endpoints.method(CONF_TYPE_GET_REQUEST, SessionForms,
path='getConferenceSessionsByType',
http_method='GET', name='getConferenceSessionsByType')
def getConferenceSessionsByType(self, request):
"""Get conference sessions filtered by type"""
conference_key = ndb.Key(urlsafe=request.websafeConferenceKey)
query = Session.query(ancestor=conference_key) \
.filter(Session.typeOfSession == str(request.typeOfSession))
return SessionForms(
items=[self._copySessionToForm(session) for session in query]
)
@endpoints.method(SPEAKER_REQUEST, SessionForms,
path='getSessionsBySpeaker',
http_method='GET', name='getSessionsBySpeaker')
def getSessionsBySpeaker(self, request):
"""Get all sessions for a speaker"""
speaker_key = ndb.Key(urlsafe=request.websafeSpeakerKey)
query = Session.query().filter(Session.speakerKey == speaker_key)
return SessionForms(
items=[self._copySessionToForm(session) for session in query]
)
def _copySpeakerToForm(self, speaker):
"""Copy relevant fields from Speaker to SpeakerForm."""
form = SpeakerForm()
if speaker is not None:
setattr(form, 'websafeKey', speaker.key.urlsafe())
for field in form.all_fields():
if hasattr(speaker, field.name):
setattr(form, field.name, getattr(speaker, field.name))
form.check_initialized()
return form
def _createSpeakerObject(self, request):
"""Create Speaker object,
returning SpeakerForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException(
"Speaker 'name' field required")
# copy SpeakerForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) \
for field in request.all_fields()}
del data['websafeKey']
speaker_id = Speaker.allocate_ids(size=1)[0]
speaker_key = ndb.Key(Speaker, speaker_id)
data['key'] = speaker_key
# create Speaker
speaker = Speaker(**data)
speaker.put()
# return the modified SpeakerForm
return self._copySpeakerToForm(speaker)
@endpoints.method(SpeakerForm, SpeakerForm,
path='createSpeaker',
http_method='POST', name='createSpeaker')
def createSpeaker(self, request):
"""Create a speaker"""
return self._createSpeakerObject(request)
@endpoints.method(StringMessage, SpeakerForms,
path='getSpeakersByName',
http_method='GET', name='getSpeakersByName')
def getSpeakersByName(self, request):
"""Get a list of speakers with the given name"""
query = Speaker.query().filter(Speaker.name == request.data)
return SpeakerForms(
items=[self._copySpeakerToForm(speaker) for speaker in query]
)
@endpoints.method(SESSION_GET_REQUEST, SpeakerForm,
path='getSpeakerForSession',
http_method='GET', name='getSpeakerForSession')
def getSpeakerForSession(self, request):
"""Get speaker for a session"""
speaker_key = ndb.Key(urlsafe=request.websafeSessionKey) \
.get().speakerKey.get()
return self._copySpeakerToForm(speaker_key)
@staticmethod
def _featureSpeaker(urlsafeSpeakerKey, urlsafeConferenceKey):
"""Feature speaker with more than one session at conference"""
conference_key = ndb.Key(urlsafe=urlsafeConferenceKey)
conference_name = conference_key.get().name
speaker_key = ndb.Key(urlsafe=urlsafeSpeakerKey)
speaker_name = speaker_key.get().name
sessions = Session.query(ancestor=conference_key) \
.filter(Session.speakerKey == speaker_key)
if sessions.count() > 1:
# If there are multiple sessions for the speaker at the conference,
# add the featured speaker to the memcache
memcache.delete(MEMCACHE_FEATURED_SPEAKER)
announcement = 'Now at %s, attend these sessions from speaker %s: %s' \
% (conference_name, speaker_name,
', '.join(session.name for session in sessions))
memcache.set(MEMCACHE_FEATURED_SPEAKER, announcement)
else:
announcement = ''
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/featured_speaker/get',
http_method='GET', name='getFeaturedSpeaker')
def getFeaturedSpeaker(self, request):
"""Return Featured Speaker from memcache."""
return StringMessage(
data=memcache.get(MEMCACHE_FEATURED_SPEAKER) or "")
def _createSessionObject(self, request):
"""Create or update Session object,
returning SessionForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
if not request.name:
raise endpoints.BadRequestException(
"Session 'name' field required")
# Get Conference Key
conference_key = ndb.Key(urlsafe=request.websafeConferenceKey)
# check that conference exists
if not conference_key:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# Get Speaker Key
speaker_key = ndb.Key(urlsafe=request.websafeSpeakerKey)
# check that speaker exists
if not speaker_key:
raise endpoints.NotFoundException(
'No speaker found with key: %s' % request.websafeSpeakerKey)
userId = getUserId(user)
if userId != conference_key.get().organizerUserId:
raise ConflictException(
'Only the conference organizer can make sessions for the conference')
# copy SessionForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) \
for field in request.all_fields()}
del data['websafeKey']
del data['websafeConferenceKey']
del data['websafeSpeakerKey']
# add default values for those missing (both data model & outbound Message)
for default in SESSION_DEFAULTS:
if data[default] in (None, []):
data[default] = SESSION_DEFAULTS[default]
setattr(request, default, SESSION_DEFAULTS[default])
# convert dates from strings to Date objects; set month based on start_date
if data['startTime']:
data['startTime'] = datetime.strptime(
data['startTime'], "%H:%M").time()
if data['date']:
data['date'] = datetime.strptime(
data['date'][:10], "%Y-%m-%d").date()
if data['typeOfSession']:
# Only take string form of type of session enum
data['typeOfSession'] = data['typeOfSession'].name
session_id = Session.allocate_ids(size=1, parent=conference_key)[0]
session_key = ndb.Key(Session, session_id, parent=conference_key)
data['key'] = session_key
data['conferenceKey'] = conference_key
data['speakerKey'] = speaker_key
# create Session, send email to organizer confirming
# creation of Session & return (modified) SessionForm
session = Session(**data)
session.put()
taskqueue.add(params={'urlsafeSpeakerKey': request.websafeSpeakerKey,
'urlsafeConferenceKey': request.websafeConferenceKey},
url='/tasks/feature_speaker'
)
return self._copySessionToForm(session)
@endpoints.method(SESSION_POST_REQUEST, SessionForm,
path='session',
http_method='POST', name='createSession')
def createSession(self, request):
"""Create a session"""
return self._createSessionObject(request)
@endpoints.method(SESSION_DELETE_REQUEST, StringMessage,
path='session',
http_method='DELETE', name='deleteSession')
def deleteSession(self, request):
"""Delete a session"""
# Get current user
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# Get Session Key
session_key = ndb.Key(urlsafe=request.websafeSessionKey)
# check that session exists
if not session_key:
raise endpoints.NotFoundException(
'No session found with key: %s' % request.websafeSessionKey)
# Check that user matches conference organizer
conference_key = session_key.get().conferenceKey
if user_id != conference_key.get().organizerUserId:
raise ConflictException(
'Only the conference organizer can delete sessions for the conference')
session_key.delete()
# Delete session_key from profile wishlists
profiles = Profile.query()
for profile in profiles:
if session_key in profile.sessionWishlist:
profile.sessionWishlist.remove(session_key)
profile.put()
return StringMessage(data='Session deleted')
@endpoints.method(WISHLIST_POST_REQUEST, StringMessage,
path='profile/wishlist',
http_method='POST', name='addSessionToWishlist')
def addSessionToWishlist(self, request):
"""Add a session to the current user's wishlist"""
# Get current user
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
profile = ndb.Key(Profile, user_id).get()
session_key = ndb.Key(urlsafe=request.websafeSessionKey)
if session_key not in profile.sessionWishlist:
profile.sessionWishlist.append(session_key)
profile.put()
else:
raise endpoints.BadRequestException(
'Session to add already exists in the user\'s wishlist')
return StringMessage(data='Session added to wishlist')
@endpoints.method(message_types.VoidMessage, SessionForms,
path='profile/wishlist',
http_method='GET', name='getSessionsInWishlist')
def getSessionsInWishList(self, request):
"""Get all sessions in the user's wishlist"""
# Get current user
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
profile = ndb.Key(Profile, user_id).get()
session_keys = profile.sessionWishlist
return SessionForms(
items=[self._copySessionToForm(session_key.get()) \
for session_key in session_keys]
)
@endpoints.method(WISHLIST_POST_REQUEST, StringMessage,
path='profile/wishlist',
http_method='DELETE', name='deleteSessionInWishlist')
def deleteSessionInWishlist(self, request):
"""Delete a session in the user's wishlist"""
# Get current user
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
profile = ndb.Key(Profile, user_id).get()
session_key = ndb.Key(urlsafe=request.websafeSessionKey)
if session_key in profile.sessionWishlist:
profile.sessionWishlist.remove(session_key)
profile.put()
else:
raise endpoints.BadRequestException(
'Session to delete does not exist in the user\'s wishlist')
return StringMessage(data='Session deleted from wishlist')
@endpoints.method(message_types.VoidMessage, StringMessage,
path='deleteAllSessionsInWishlist',
http_method='DELETE', name='deleteAllSessionsInWishlist')
def deleteAllSessionsInWishlist(self, request):
"""Delete all sessions from the current user's wishlist"""
# Get current user
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
profile = ndb.Key(Profile, user_id).get()
profile.sessionWishlist = []
profile.put()
return StringMessage(data='All sessions deleted from wishlist')
@endpoints.method(SPEAKER_REQUEST, SessionForms,
path='upcomingSessionsForSpeaker',
http_method='GET', name='upcomingSessionsForSpeaker')
def upcomingSessionsForSpeaker(self, request):
"""Returns all sessions for a speaker
that taking place today or later"""
speaker_key = ndb.Key(urlsafe=request.websafeSpeakerKey)
query = Session.query().filter(Session.speakerKey == speaker_key) \
.filter(Session.date >= date.today())
return SessionForms(
items=[self._copySessionToForm(session) for session in query]
)
@endpoints.method(message_types.VoidMessage, SessionForms,
path='nonWorkshopSessionsBefore7',
http_method='GET', name='nonWorkshopSessionsBefore7')
def nonWorkshopSessionsBefore7(self, request):
"""Return all sessions that are not workshops
and start before 7pm (19:00)"""
nonWorkshop = Session.query(Session.typeOfSession != 'Workshop') \
.fetch(keys_only=True)
before7 = Session.query(Session.startTime <= \
datetime.strptime('19:00', '%H:%M').time()).fetch(keys_only=True)
sessions = ndb.get_multi(set(nonWorkshop).intersection(before7))
return SessionForms(
items=[self._copySessionToForm(session) for session in sessions]
)
api = endpoints.api_server([ConferenceApi]) # register API
| {
"repo_name": "cpwhidden/Conference-Central",
"path": "conference.py",
"copies": "1",
"size": "38673",
"license": "apache-2.0",
"hash": -4525158265753829400,
"line_mean": 36.8775710088,
"line_max": 98,
"alpha_frac": 0.6135029607,
"autogenerated": false,
"ratio": 4.229330708661418,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004714913574579827,
"num_lines": 1021
} |
# 5x4 LED arrangments for digits
num1 = [[0, 1, 0, 0],
[1, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0],
[1, 1, 1, 0]]
num2 = [[0, 1, 1, 0],
[1, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 1, 1, 1]]
num3 = [[1, 1, 1, 0],
[0, 0, 0, 1],
[0, 1, 1, 0],
[0, 0, 0, 1],
[1, 1, 1, 0]]
num4 = [[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 1, 1, 1],
[0, 0, 1, 0],
[0, 0, 1, 0]]
num5 = [[1, 1, 1, 1],
[1, 0, 0, 0],
[1, 1, 1, 0],
[0, 0, 0, 1],
[1, 1, 1, 0]]
num6 = [[0, 1, 1, 0],
[1, 0, 0, 0],
[1, 1, 1, 0],
[1, 0, 0, 1],
[0, 1, 1, 0]]
num7 = [[1, 1, 1, 1],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 0, 0]]
num8 = [[0, 1, 1, 0],
[1, 0, 0, 1],
[0, 1, 1, 0],
[1, 0, 0, 1],
[0, 1, 1, 0]]
num9 = [[0, 1, 1, 0],
[1, 0, 0, 1],
[0, 1, 1, 1],
[0, 0, 0, 1],
[1, 1, 1, 0]]
num0 = [[0, 1, 1, 0],
[1, 0, 0, 1],
[1, 0, 0, 1],
[1, 0, 0, 1],
[0, 1, 1, 0]]
numbers = [num0, num1, num2, num3, num4, num5, num6, num7, num8, num9]
| {
"repo_name": "kbsezginel/raspberry-pi",
"path": "scripts/rpi/8x8-led-lmatrix/numbers5x4.py",
"copies": "1",
"size": "1215",
"license": "bsd-3-clause",
"hash": -2691787429683438600,
"line_mean": 18.2857142857,
"line_max": 70,
"alpha_frac": 0.2576131687,
"autogenerated": false,
"ratio": 2.06984667802385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.782745984672385,
"avg_score": 0,
"num_lines": 63
} |
# 6.0001 Problem Set 3
#
# The 6.0001 Word Game
# Created by: Kevin Luu <luuk> and Jenna Wiens <jwiens>
#
# Name : <your name>
# Collaborators : <your collaborators>
# Time spent : <total time>
import math
import random
import string
import re
VOWELS = 'aeiou'
CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
HAND_SIZE = 7
SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10, '*': 0
}
# -----------------------------------
# Helper code
# (you don't need to understand this helper code)
WORDLIST_FILENAME = "words.txt"
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# wordlist: list of strings
wordlist = []
for line in inFile:
wordlist.append(line.strip().lower())
print(" ", len(wordlist), "words loaded.")
return wordlist
def get_frequency_dict(sequence):
"""
Returns a dictionary where the keys are elements of the sequence
and the values are integer counts, for the number of times that
an element is repeated in the sequence.
sequence: string or list
return: dictionary
"""
# freqs: dictionary (element_type -> int)
freq = {}
for x in sequence:
freq[x] = freq.get(x, 0) + 1
return freq
# (end of helper code)
# -----------------------------------
#
# Problem #1: Scoring a word
#
def get_word_score(word, n):
"""
Returns the score for a word. Assumes the word is a
valid word.
You may assume that the input word is always either a string of letters,
or the empty string "". You may not assume that the string will only contain
lowercase letters, so you will have to handle uppercase and mixed case strings
appropriately.
The score for a word is the product of two components:
The first component is the sum of the points for letters in the word.
The second component is the larger of:
1, or
7*wordlen - 3*(n-wordlen), where wordlen is the length of the word
and n is the hand length when the word was played
Letters are scored as in Scrabble; A is worth 1, B is
worth 3, C is worth 3, D is worth 2, E is worth 1, and so on.
word: string
n: int >= 0
returns: int >= 0
"""
total_score = 0
for l in word.lower():
total_score += SCRABBLE_LETTER_VALUES[l]
total_score *= max([HAND_SIZE * len(word) - 3 * (n - len(word)), 1])
return total_score
#
# Make sure you understand how this function works and what it does!
#
def display_hand(hand):
"""
Displays the letters currently in the hand.
For example:
display_hand({'a':1, 'x':2, 'l':3, 'e':1})
Should print out something like:
a x x l l l e
The order of the letters is unimportant.
hand: dictionary (string -> int)
"""
for letter in hand.keys():
for j in range(hand[letter]):
print(letter, end=' ') # print all on the same line
print() # print an empty line
#
# Make sure you understand how this function works and what it does!
# You will need to modify this for Problem #4.
#
def deal_hand(n):
"""
Returns a random hand containing n lowercase letters.
ceil(n/3) letters in the hand should be VOWELS (note,
ceil(n/3) means the smallest integer not less than n/3).
Hands are represented as dictionaries. The keys are
letters and the values are the number of times the
particular letter is repeated in that hand.
n: int >= 0
returns: dictionary (string -> int)
"""
hand = {'*': 1}
num_vowels = int(math.ceil(n / 3))
for i in range(num_vowels - 1):
x = random.choice(VOWELS)
hand[x] = hand.get(x, 0) + 1
for i in range(num_vowels, n):
x = random.choice(CONSONANTS)
hand[x] = hand.get(x, 0) + 1
return hand
#
# Problem #2: Update a hand by removing letters
#
def update_hand(hand, word):
"""
Does NOT assume that hand contains every letter in word at least as
many times as the letter appears in word. Letters in word that don't
appear in hand should be ignored. Letters that appear in word more times
than in hand should never result in a negative count; instead, set the
count in the returned hand to 0 (or remove the letter from the
dictionary, depending on how your code is structured).
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
new_hand = hand.copy()
for key in word.lower():
if key in new_hand:
new_hand[key] -= 1
if new_hand[key] < 1:
new_hand.pop(key)
return new_hand
#
# Problem #3: Test word validity
#
def is_valid_word(word, hand, word_list):
"""
Returns True if word is in the word_list and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or word_list.
word: string
hand: dictionary (string -> int)
word_list: list of lowercase strings
returns: boolean
"""
is_valid = True
word = word.lower()
copy_hand = hand.copy()
for l in VOWELS.lower():
if word.replace('*', l) not in word_list:
is_valid = False
else:
is_valid = True
break
for l in word:
if l not in copy_hand.keys() or copy_hand[l] < 1:
is_valid = False
else:
copy_hand[l] -= 1
return is_valid
#
# Problem #5: Playing a hand
#
def calculate_handlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
return len(hand)
def play_hand(hand, word_list):
"""
Allows the user to play the given hand, as follows:
* The hand is displayed.
* The user may input a word.
* When any word is entered (valid or invalid), it uses up letters
from the hand.
* An invalid word is rejected, and a message is displayed asking
the user to choose another word.
* After every valid word: the score for that word is displayed,
the remaining letters in the hand are displayed, and the user
is asked to input another word.
* The sum of the word scores is displayed when the hand finishes.
* The hand finishes when there are no more unused letters.
The user can also finish playing the hand by inputing two
exclamation points (the string '!!') instead of a word.
hand: dictionary (string -> int)
word_list: list of lowercase strings
returns: the total score for the hand
"""
total_score = 0
copy_hand = hand.copy()
while len(copy_hand) > 0:
print('Current Hand:', end=' ')
display_hand(copy_hand)
user_word = input(
'Enter word, or "!!" to indicate that you are finished: ')
if user_word == '!!':
print('Total score:', total_score)
return total_score
elif is_valid_word(user_word, copy_hand, word_list):
current_points = get_word_score(user_word, len(copy_hand))
total_score += current_points
print('"' + user_word + '"',
'earned', current_points,
'points. Total:', total_score, 'points')
else:
print('That is not a valid word. Please choose another word.')
for l in user_word:
if l in copy_hand.keys():
copy_hand[l] -= 1
if copy_hand[l] == 0:
copy_hand.pop(l)
print()
print('Ran out of letters.')
print('Total score:', total_score, 'points')
print('----------')
return total_score
#
# Problem #6: Playing a game
#
#
# procedure you will use to substitute a letter in a hand
#
def substitute_hand(hand, letter):
"""
Allow the user to replace all copies of one letter in the hand (chosen by user)
with a new letter chosen from the VOWELS and CONSONANTS at random. The new letter
should be different from user's choice, and should not be any of the letters
already in the hand.
If user provide a letter not in the hand, the hand should be the same.
Has no side effects: does not mutate hand.
For example:
substitute_hand({'h':1, 'e':1, 'l':2, 'o':1}, 'l')
might return:
{'h':1, 'e':1, 'o':1, 'x':2} -> if the new letter is 'x'
The new letter should not be 'h', 'e', 'l', or 'o' since those letters were
already in the hand.
hand: dictionary (string -> int)
letter: string
returns: dictionary (string -> int)
"""
result = hand
if letter in hand:
# replace old letter with new
new_char = random.choice(
re.sub('[' + '|'.join(result.keys()) + ']', '', string.ascii_lowercase))
result[new_char] = result[letter]
result.pop(letter)
return result
def play_game(word_list):
"""
Allow the user to play a series of hands
* Asks the user to input a total number of hands
* Accumulates the score for each hand into a total score for the
entire series
* For each hand, before playing, ask the user if they want to substitute
one letter for another. If the user inputs 'yes', prompt them for their
desired letter. This can only be done once during the game. Once the
substitue option is used, the user should not be asked if they want to
substitute letters in the future.
* For each hand, ask the user if they would like to replay the hand.
If the user inputs 'yes', they will replay the hand and keep
the better of the two scores for that hand. This can only be done once
during the game. Once the replay option is used, the user should not
be asked if they want to replay future hands. Replaying the hand does
not count as one of the total number of hands the user initially
wanted to play.
* Note: if you replay a hand, you do not get the option to substitute
a letter - you must play whatever hand you just had.
* Returns the total score for the series of hands
word_list: list of lowercase strings
"""
total_score = 0
number_of_hands = int(input('Enter total number of hands: '))
replay_old_hand = ''
current_hand = {}
while number_of_hands > 0:
if replay_old_hand.lower() != 'yes':
current_hand = deal_hand(HAND_SIZE)
replay_old_hand = ''
print('Current hand:', end=' ')
display_hand(current_hand)
player_substitute_letter = input(
'Would you like substitute a letter?: ')
print()
if player_substitute_letter != 'no':
current_hand = substitute_hand(
current_hand, player_substitute_letter)
display_hand(current_hand)
total_score += play_hand(current_hand, word_list)
number_of_hands-=1
if number_of_hands==0:
break
else:
replay_old_hand = input('Would you like to replay the hand?: ')
print('Total score over all hands:', total_score)
#
# Build data structures used for entire session and play game
# Do not remove the "if __name__ == '__main__':" line - this code is executed
# when the program is run directly, instead of through an import statement
#
if __name__ == '__main__':
word_list = load_words()
play_game(word_list)
| {
"repo_name": "Dreemsuncho/Introduction-to-Computer-Science-and-Programming-using-python-MIT",
"path": "Class/ps3/ps3.py",
"copies": "1",
"size": "12040",
"license": "mit",
"hash": 3318135784448066600,
"line_mean": 27.1967213115,
"line_max": 220,
"alpha_frac": 0.6063122924,
"autogenerated": false,
"ratio": 3.7754782063342742,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9868319815727953,
"avg_score": 0.0026941366012643133,
"num_lines": 427
} |
# 6.0001 Problem Set 3
#
# The 6.0001 Word Game
# Created by: Kevin Luu <luuk> and Jenna Wiens <jwiens>
#
# Name : <your name>
# Collaborators : <your collaborators>
# Time spent : <total time>
import math
import random
import string
VOWELS = 'aeiou'
CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
HAND_SIZE = 7
SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
# -----------------------------------
# Helper code
# (you don't need to understand this helper code)
WORDLIST_FILENAME = "words.txt"
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# wordlist: list of strings
wordlist = []
for line in inFile:
wordlist.append(line.strip().lower())
print(" ", len(wordlist), "words loaded.")
return wordlist
def get_frequency_dict(sequence):
"""
Returns a dictionary where the keys are elements of the sequence
and the values are integer counts, for the number of times that
an element is repeated in the sequence.
sequence: string or list
return: dictionary
"""
# freqs: dictionary (element_type -> int)
freq = {}
for x in sequence:
freq[x] = freq.get(x,0) + 1
return freq
# (end of helper code)
# -----------------------------------
#
# Problem #1: Scoring a word
#
def get_word_score(word, n):
"""
Returns the score for a word. Assumes the word is a
valid word.
You may assume that the input word is always either a string of letters,
or the empty string "". You may not assume that the string will only contain
lowercase letters, so you will have to handle uppercase and mixed case strings
appropriately.
The score for a word is the product of two components:
The first component is the sum of the points for letters in the word.
The second component is the larger of:
1, or
7*wordlen - 3*(n-wordlen), where wordlen is the length of the word
and n is the hand length when the word was played
Letters are scored as in Scrabble; A is worth 1, B is
worth 3, C is worth 3, D is worth 2, E is worth 1, and so on.
word: string
n: int >= 0
returns: int >= 0
"""
pass # TO DO... Remove this line when you implement this function
#
# Make sure you understand how this function works and what it does!
#
def display_hand(hand):
"""
Displays the letters currently in the hand.
For example:
display_hand({'a':1, 'x':2, 'l':3, 'e':1})
Should print out something like:
a x x l l l e
The order of the letters is unimportant.
hand: dictionary (string -> int)
"""
for letter in hand.keys():
for j in range(hand[letter]):
print(letter, end=' ') # print all on the same line
print() # print an empty line
#
# Make sure you understand how this function works and what it does!
# You will need to modify this for Problem #4.
#
def deal_hand(n):
"""
Returns a random hand containing n lowercase letters.
ceil(n/3) letters in the hand should be VOWELS (note,
ceil(n/3) means the smallest integer not less than n/3).
Hands are represented as dictionaries. The keys are
letters and the values are the number of times the
particular letter is repeated in that hand.
n: int >= 0
returns: dictionary (string -> int)
"""
hand={}
num_vowels = int(math.ceil(n / 3))
for i in range(num_vowels):
x = random.choice(VOWELS)
hand[x] = hand.get(x, 0) + 1
for i in range(num_vowels, n):
x = random.choice(CONSONANTS)
hand[x] = hand.get(x, 0) + 1
return hand
#
# Problem #2: Update a hand by removing letters
#
def update_hand(hand, word):
"""
Does NOT assume that hand contains every letter in word at least as
many times as the letter appears in word. Letters in word that don't
appear in hand should be ignored. Letters that appear in word more times
than in hand should never result in a negative count; instead, set the
count in the returned hand to 0 (or remove the letter from the
dictionary, depending on how your code is structured).
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
pass # TO DO... Remove this line when you implement this function
#
# Problem #3: Test word validity
#
def is_valid_word(word, hand, word_list):
"""
Returns True if word is in the word_list and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or word_list.
word: string
hand: dictionary (string -> int)
word_list: list of lowercase strings
returns: boolean
"""
pass # TO DO... Remove this line when you implement this function
#
# Problem #5: Playing a hand
#
def calculate_handlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
pass # TO DO... Remove this line when you implement this function
def play_hand(hand, word_list):
"""
Allows the user to play the given hand, as follows:
* The hand is displayed.
* The user may input a word.
* When any word is entered (valid or invalid), it uses up letters
from the hand.
* An invalid word is rejected, and a message is displayed asking
the user to choose another word.
* After every valid word: the score for that word is displayed,
the remaining letters in the hand are displayed, and the user
is asked to input another word.
* The sum of the word scores is displayed when the hand finishes.
* The hand finishes when there are no more unused letters.
The user can also finish playing the hand by inputing two
exclamation points (the string '!!') instead of a word.
hand: dictionary (string -> int)
word_list: list of lowercase strings
returns: the total score for the hand
"""
# BEGIN PSEUDOCODE <-- Remove this comment when you implement this function
# Keep track of the total score
# As long as there are still letters left in the hand:
# Display the hand
# Ask user for input
# If the input is two exclamation points:
# End the game (break out of the loop)
# Otherwise (the input is not two exclamation points):
# If the word is valid:
# Tell the user how many points the word earned,
# and the updated total score
# Otherwise (the word is not valid):
# Reject invalid word (print a message)
# update the user's hand by removing the letters of their inputted word
# Game is over (user entered '!!' or ran out of letters),
# so tell user the total score
# Return the total score as result of function
#
# Problem #6: Playing a game
#
#
# procedure you will use to substitute a letter in a hand
#
def substitute_hand(hand, letter):
"""
Allow the user to replace all copies of one letter in the hand (chosen by user)
with a new letter chosen from the VOWELS and CONSONANTS at random. The new letter
should be different from user's choice, and should not be any of the letters
already in the hand.
If user provide a letter not in the hand, the hand should be the same.
Has no side effects: does not mutate hand.
For example:
substitute_hand({'h':1, 'e':1, 'l':2, 'o':1}, 'l')
might return:
{'h':1, 'e':1, 'o':1, 'x':2} -> if the new letter is 'x'
The new letter should not be 'h', 'e', 'l', or 'o' since those letters were
already in the hand.
hand: dictionary (string -> int)
letter: string
returns: dictionary (string -> int)
"""
pass # TO DO... Remove this line when you implement this function
def play_game(word_list):
"""
Allow the user to play a series of hands
* Asks the user to input a total number of hands
* Accumulates the score for each hand into a total score for the
entire series
* For each hand, before playing, ask the user if they want to substitute
one letter for another. If the user inputs 'yes', prompt them for their
desired letter. This can only be done once during the game. Once the
substitue option is used, the user should not be asked if they want to
substitute letters in the future.
* For each hand, ask the user if they would like to replay the hand.
If the user inputs 'yes', they will replay the hand and keep
the better of the two scores for that hand. This can only be done once
during the game. Once the replay option is used, the user should not
be asked if they want to replay future hands. Replaying the hand does
not count as one of the total number of hands the user initially
wanted to play.
* Note: if you replay a hand, you do not get the option to substitute
a letter - you must play whatever hand you just had.
* Returns the total score for the series of hands
word_list: list of lowercase strings
"""
print("play_game not implemented.") # TO DO... Remove this line when you implement this function
#
# Build data structures used for entire session and play game
# Do not remove the "if __name__ == '__main__':" line - this code is executed
# when the program is run directly, instead of through an import statement
#
if __name__ == '__main__':
word_list = load_words()
play_game(word_list)
| {
"repo_name": "indefinitelee/Learning",
"path": "MIT_60001_Introduction_to_Computer_Science_and_Programming_in_Python/assignments/PS3/ps3.py",
"copies": "1",
"size": "10691",
"license": "mit",
"hash": 6662195679950383000,
"line_mean": 28.9884057971,
"line_max": 212,
"alpha_frac": 0.6089233935,
"autogenerated": false,
"ratio": 3.9655044510385755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5074427844538576,
"avg_score": null,
"num_lines": null
} |
# 6.00.1x
# Problem Set 7 Test Suite
import unittest
import sys
from ps7 import *
class ProblemSet7NewsStory(unittest.TestCase):
def setUp(self):
pass
def testNewsStoryConstructor(self):
story = NewsStory('', '', '', '', '')
def testNewsStoryGetGuid(self):
story = NewsStory('test guid', 'test title', 'test subject',
'test summary', 'test link')
self.assertEquals(story.getGuid(), 'test guid')
def testNewsStoryGetTitle(self):
story = NewsStory('test guid', 'test title', 'test subject',
'test summary', 'test link')
self.assertEquals(story.getTitle(), 'test title')
def testNewsStoryGetSubject(self):
story = NewsStory('test guid', 'test title', 'test subject',
'test summary', 'test link')
self.assertEquals(story.getSubject(), 'test subject')
def testNewsStoryGetSummary(self):
story = NewsStory('test guid', 'test title', 'test subject',
'test summary', 'test link')
self.assertEquals(story.getSummary(), 'test summary')
def testNewsStoryGetLink(self):
story = NewsStory('test guid', 'test title', 'test subject',
'test summary', 'test link')
self.assertEquals(story.getLink(), 'test link')
class ProblemSet7(unittest.TestCase):
def setUp(self):
class TrueTrigger:
def evaluate(self, story): return True
class FalseTrigger:
def evaluate(self, story): return False
self.tt = TrueTrigger()
self.tt2 = TrueTrigger()
self.ft = FalseTrigger()
self.ft2 = FalseTrigger()
def test1TitleTrigger(self):
koala = NewsStory('', 'Koala bears are soft and cuddly', '', '', '')
pillow = NewsStory('', 'I prefer pillows that are soft.', '', '', '')
soda = NewsStory('', 'Soft drinks are great', '', '', '')
pink = NewsStory('', "Soft's the new pink!", '', '', '')
football = NewsStory('', '"Soft!" he exclaimed as he threw the football', '', '', '')
microsoft = NewsStory('', 'Microsoft announced today that pillows are bad', '', '', '')
nothing = NewsStory('', 'Reuters reports something really boring', '', '' ,'')
caps = NewsStory('', 'soft things are soft', '', '', '')
s1 = TitleTrigger('SOFT')
s2 = TitleTrigger('soft')
for trig in [s1, s2]:
self.assertTrue(trig.evaluate(koala), "TitleTrigger failed to fire when the word appeared in the title")
self.assertTrue(trig.evaluate(pillow), "TitleTrigger failed to fire when the word had punctuation on it")
self.assertTrue(trig.evaluate(soda), "TitleTrigger failed to fire when the case was different")
self.assertTrue(trig.evaluate(pink), "TitleTrigger failed to fire when the word had an apostrophe on it")
self.assertTrue(trig.evaluate(football), "TitleTrigger failed to fire in the presence of lots of punctuation")
self.assertTrue(trig.evaluate(caps), "TitleTrigger is case-sensitive and shouldn't be")
self.assertFalse(trig.evaluate(microsoft), "TitleTrigger fired when the word was present, but not as its own word (e.g. 'soft' and 'Microsoft)'")
self.assertFalse(trig.evaluate(nothing), "TitleTrigger fired when the word wasn't really present in the title")
def test2SubjectTrigger(self):
koala = NewsStory('', '', 'Koala bears are soft and cuddly', '', '')
pillow = NewsStory('', '', 'I prefer pillows that are soft.', '', '')
soda = NewsStory('', '', 'Soft drinks are great', '', '')
pink = NewsStory('', '', "Soft's the new pink!", '', '')
football = NewsStory('', '', '"Soft!" he exclaimed as he threw the football', '', '')
microsoft = NewsStory('', '', 'Microsoft announced today that pillows are bad', '', '')
nothing = NewsStory('', '', 'Reuters reports something really boring', '', '')
caps = NewsStory('', '', 'soft things are soft', '', '')
s1 = SubjectTrigger('SOFT')
s2 = SubjectTrigger('soft')
for trig in [s1, s2]:
self.assertTrue(trig.evaluate(koala), "SubjectTrigger failed to fire when the word appeared in the subject")
self.assertTrue(trig.evaluate(pillow), "SubjectTrigger failed to fire when the word had punctuation on it")
self.assertTrue(trig.evaluate(soda), "SubjectTrigger failed to fire when the case was different")
self.assertTrue(trig.evaluate(pink), "SubjectTrigger failed to fire when the word had an apostrophe on it")
self.assertTrue(trig.evaluate(football), "SubjectTrigger failed to fire in the presence of lots of punctuation")
self.assertTrue(trig.evaluate(caps), "SubjectTrigger is case-sensitive and shouldn't be")
self.assertFalse(trig.evaluate(microsoft), "SubjectTrigger fired when the word was present, but not as its own word (e.g. 'soft' and 'Microsoft)'")
self.assertFalse(trig.evaluate(nothing), "SubjectTrigger fired when the word wasn't really present in the subject")
def test3SummaryTrigger(self):
koala = NewsStory('', '', '', 'Koala bears are soft and cuddly', '')
pillow = NewsStory('', '', '', 'I prefer pillows that are soft.', '')
soda = NewsStory('', '', '', 'Soft drinks are great', '')
pink = NewsStory('', '', '', "Soft's the new pink!", '')
football = NewsStory('', '', '', '"Soft!" he exclaimed as he threw the football', '')
microsoft = NewsStory('', '', '', 'Microsoft announced today that pillows are bad', '')
nothing = NewsStory('', '', '', 'Reuters reports something really boring', '')
caps = NewsStory('', '', '', 'soft things are soft', '')
s1 = SummaryTrigger('SOFT')
s2 = SummaryTrigger('soft')
for trig in [s1, s2]:
self.assertTrue(trig.evaluate(koala), "SummaryTrigger failed to fire when the word appeared in the summary.")
self.assertTrue(trig.evaluate(pillow), "SummaryTrigger failed to fire when the word had punctuation on it")
self.assertTrue(trig.evaluate(soda), "SummaryTrigger failed to fire when the case was different")
self.assertTrue(trig.evaluate(pink), "SummaryTrigger failed to fire when the word had an apostrophe on it")
self.assertTrue(trig.evaluate(football), "SummaryTrigger failed to fire in the presence of lots of punctuation")
self.assertTrue(trig.evaluate(caps), "SummaryTrigger is case-sensitive and shouldn't be")
self.assertFalse(trig.evaluate(microsoft), "SummaryTrigger fired when the word was present, but not as its own word (e.g. 'soft' and 'Microsoft)'")
self.assertFalse(trig.evaluate(nothing), "SummaryTrigger fired when the word wasn't really present in the summary")
def test4NotTrigger(self):
n = NotTrigger(self.tt)
b = NewsStory("guid", "title", "subj", "summary", "link")
self.assertFalse(n.evaluate(b), "A NOT trigger applied to 'always true' DID NOT return false")
y = NotTrigger(self.ft)
self.assertTrue(y.evaluate(b), "A NOT trigger applied to 'always false' DID NOT return true")
def test5AndTrigger(self):
yy = AndTrigger(self.tt, self.tt2)
yn = AndTrigger(self.tt, self.ft)
ny = AndTrigger(self.ft, self.tt)
nn = AndTrigger(self.ft, self.ft2)
b = NewsStory("guid", "title", "subj", "summary", "link")
self.assertTrue(yy.evaluate(b), "AND of 'always true' and 'always true' should be true")
self.assertFalse(yn.evaluate(b), "AND of 'always true' and 'always false' should be false")
self.assertFalse(ny.evaluate(b), "AND of 'always false' and 'always true' should be false")
self.assertFalse(nn.evaluate(b), "AND of 'always false' and 'always false' should be false")
def test6OrTrigger(self):
yy = OrTrigger(self.tt, self.tt2)
yn = OrTrigger(self.tt, self.ft)
ny = OrTrigger(self.ft, self.tt)
nn = OrTrigger(self.ft, self.ft2)
b = NewsStory("guid", "title", "subj", "summary", "link")
self.assertTrue(yy.evaluate(b), "OR of 'always true' and 'always true' should be true")
self.assertTrue(yn.evaluate(b), "OR of 'always true' and 'always false' should be true")
self.assertTrue(ny.evaluate(b), "OR of 'always false' and 'always true' should be true")
self.assertFalse(nn.evaluate(b), "OR of 'always false' and 'always false' should be false")
def test7PhraseTrigger(self):
pt = PhraseTrigger("New York City")
a = NewsStory('', "asfdNew York Cityasfdasdfasdf", '', '', '')
b = NewsStory('', '', "asdfasfdNew York Cityasfdasdfasdf", '', '')
c = NewsStory('', '', '', "asdfasfdNew York Cityasfdasdfasdf", '')
noa = NewsStory('', "something something new york city", '', '', '')
nob = NewsStory('', '', "something something new york city", '', '')
noc = NewsStory('', '', '', "something something new york city", '')
self.assertTrue(pt.evaluate(a), "PhraseTrigger doesn't find phrase in title")
self.assertTrue(pt.evaluate(b), "PhraseTrigger doesn't find phrase in subject")
self.assertTrue(pt.evaluate(c), "PhraseTrigger doesn't find phrase in summary")
for s in [noa, nob, noc]:
print pt.evaluate(s)
self.assertFalse(pt.evaluate(s), "PhraseTrigger is case-insensitive, and shouldn't be")
def test8FilterStories(self):
pt = PhraseTrigger("New York City")
a = NewsStory('', "asfdNew York Cityasfdasdfasdf", '', '', '')
b = NewsStory('', '', "asdfasfdNew York Cityasfdasdfasdf", '', '')
c = NewsStory('', '', '', "asdfasfdNew York Cityasfdasdfasdf", '')
noa = NewsStory('', "something something new york city", '', '', '')
nob = NewsStory('', '', "something something new york city", '', '')
noc = NewsStory('', '', '', "something something new york city", '')
triggers = [pt, self.tt, self.ft]
stories = [a, b, c, noa, nob, noc]
filteredStories = filterStories(stories, triggers)
for story in stories:
self.assertTrue(story in filteredStories)
filteredStories = filterStories(stories, [self.ft])
self.assertEquals(len(filteredStories), 0)
def test8FilterStories2(self):
pt = PhraseTrigger("New York City")
a = NewsStory('', "asfdNew York Cityasfdasdfasdf", '', '', '')
b = NewsStory('', '', "asdfasfdNew York Cityasfdasdfasdf", '', '')
c = NewsStory('', '', '', "asdfasfdNew York Cityasfdasdfasdf", '')
noa = NewsStory('', "something something new york city", '', '', '')
nob = NewsStory('', '', "something something new york city", '', '')
noc = NewsStory('', '', '', "something something new york city", '')
class MatchTrigger(Trigger):
def __init__(self, story):
self.story = story
def evaluate(self, story):
return story == self.story
triggers = [MatchTrigger(a), MatchTrigger(nob)]
stories = [a, b, c, noa, nob, noc]
filteredStories = filterStories(stories, triggers)
self.assertTrue(a in filteredStories)
self.assertTrue(nob in filteredStories)
self.assertEquals(2, len(filteredStories))
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ProblemSet7NewsStory))
suite.addTest(unittest.makeSuite(ProblemSet7))
# unittest.TextTestRunner(verbosity=2).run(suite)
unittest.TextTestRunner(verbosity=2, stream=sys.stdout).run(suite)
| {
"repo_name": "spradeepv/dive-into-python",
"path": "edx/problem_set_7/ps7_test.py",
"copies": "1",
"size": "11910",
"license": "mit",
"hash": 338355974331948700,
"line_mean": 55.7142857143,
"line_max": 159,
"alpha_frac": 0.6130982368,
"autogenerated": false,
"ratio": 3.901080904028824,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.995087304514017,
"avg_score": 0.01266121913773076,
"num_lines": 210
} |
# 6.00.1x Problem Set 7
# RSS Feed Filter
import feedparser
import string
import time
from project_util import translate_html
from Tkinter import *
#-----------------------------------------------------------------------
#
# Problem Set 7
#======================
# Code for retrieving and parsing RSS feeds
# Do not change this code
#======================
def process(url):
"""
Fetches news items from the rss url and parses them.
Returns a list of NewsStory-s.
"""
feed = feedparser.parse(url)
entries = feed.entries
ret = []
for entry in entries:
guid = entry.guid
title = translate_html(entry.title)
link = entry.link
summary = translate_html(entry.summary)
try:
subject = translate_html(entry.tags[0]['term'])
except AttributeError:
subject = ""
newsStory = NewsStory(guid, title, subject, summary, link)
ret.append(newsStory)
return ret
#======================
#======================
# Part 1
# Data structure design
#======================
# Problem 1
class NewsStory(object):
def __init__(self, news_guid, news_title, news_subject, news_summary, news_link):
self.news_guid = news_guid
self.news_title = news_title
self.news_subject = news_subject
self.news_summary = news_summary
self.news_link = news_link
def getGuid(self):
return self.news_guid
def getTitle(self):
return self.news_title
def getSubject(self):
return self.news_subject
def getSummary(self):
return self.news_summary
def getLink(self):
return self.news_link
#======================
# Part 2
# Triggers
#======================
class Trigger(object):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
raise NotImplementedError
# Whole Word Triggers
# Problems 2-5
class WordTrigger(Trigger):
def __init__(self, word):
self.word = string.lower(word)
def isWordIn(self, text):
text = string.lower(text)
for c in string.punctuation:
text = string.replace(text, c, ' ')
words = string.split(text, ' ')
return self.word in words
class TitleTrigger(WordTrigger):
def evaluate(self, story):
return self.isWordIn(story.getTitle())
class SubjectTrigger(WordTrigger):
def evaluate(self, story):
return self.isWordIn(story.getSubject())
class SummaryTrigger(WordTrigger):
def evaluate(self, story):
return self.isWordIn(story.getSummary())
# Composite Triggers
# Problems 6-8
class NotTrigger(Trigger):
def __init__(self, trigger):
self.trigger = trigger
def evaluate(self, story):
return not self.trigger.evaluate(story)
class AndTrigger(Trigger):
def __init__(self, trigger1, trigger2):
self.trigger1 = trigger1
self.trigger2 = trigger2
def evaluate(self, story):
return self.trigger1.evaluate(story) and self.trigger2.evaluate(story)
class OrTrigger(Trigger):
def __init__(self, trigger1, trigger2):
self.trigger1 = trigger1
self.trigger2 = trigger2
def evaluate(self, story):
return self.trigger1.evaluate(story) or self.trigger2.evaluate(story)
# Phrase Trigger
# Question 9
class PhraseTrigger(Trigger):
def __init__(self, phrase):
self.phrase = phrase
def evaluate(self, story):
return self.phrase in story.getTitle() or self.phrase in story.getSummary() or self.phrase in story.getSubject()
#======================
# Part 3
# Filtering
#======================
def filterStories(stories, triggerlist):
"""
Takes in a list of NewsStory instances.
Returns: a list of only the stories for which a trigger in triggerlist fires.
"""
# TODO: Problem 10
filteredStories = []
for story in stories:
for trigger in triggerlist:
if trigger.evaluate(story):
filteredStories.append(story)
break
return filteredStories
#======================
# Part 4
# User-Specified Triggers
#======================
def makeTrigger(triggerMap, triggerType, params, name):
"""
Takes in a map of names to trigger instance, the type of trigger to make,
and the list of parameters to the constructor, and adds a new trigger
to the trigger map dictionary.
triggerMap: dictionary with names as keys (strings) and triggers as values
triggerType: string indicating the type of trigger to make (ex: "TITLE")
params: list of strings with the inputs to the trigger constructor (ex: ["world"])
name: a string representing the name of the new trigger (ex: "t1")
Modifies triggerMap, adding a new key-value pair for this trigger.
Returns a new instance of a trigger (ex: TitleTrigger, AndTrigger).
"""
if triggerType == 'TITLE':
triggerMap[name] = TitleTrigger(params[0])
if triggerType == 'SUBJECT':
triggerMap[name] = SubjectTrigger(params[0])
if triggerType == 'SUMMARY':
triggerMap[name] = SummaryTrigger(params[0])
if triggerType == 'NOT':
triggerMap[name] = NotTrigger(triggerMap[params[0]])
if triggerType == 'AND':
triggerMap[name] = AndTrigger(triggerMap[params[0]], triggerMap[params[1]])
if triggerType == 'OR':
triggerMap[name] = OrTrigger(triggerMap[params[0]], triggerMap[params[1]])
if triggerType == 'PHRASE':
triggerMap[name] = PhraseTrigger(string.join(params))
return triggerMap[name]
def readTriggerConfig(filename):
"""
Returns a list of trigger objects
that correspond to the rules set
in the file filename
"""
# Here's some code that we give you
# to read in the file and eliminate
# blank lines and comments
triggerfile = open(filename, "r")
all = [line.rstrip() for line in triggerfile.readlines()]
lines = []
for line in all:
if len(line) == 0 or line[0] == '#':
continue
lines.append(line)
triggers = []
triggerMap = {}
# Be sure you understand this code - we've written it for you,
# but it's code you should be able to write yourself
for line in lines:
linesplit = line.split(" ")
# Making a new trigger
if linesplit[0] != "ADD":
trigger = makeTrigger(triggerMap, linesplit[1],
linesplit[2:], linesplit[0])
# Add the triggers to the list
else:
for name in linesplit[1:]:
triggers.append(triggerMap[name])
return triggers
import thread
SLEEPTIME = 60 #seconds -- how often we poll
def main_thread(master):
# A sample trigger list - you'll replace
# this with something more configurable in Problem 11
try:
# These will probably generate a few hits...
t1 = TitleTrigger("Obama")
t2 = SubjectTrigger("Romney")
t3 = PhraseTrigger("Election")
t4 = OrTrigger(t2, t3)
triggerlist = [t1, t4]
# TODO: Problem 11
# After implementing makeTrigger, uncomment the line below:
triggerlist = readTriggerConfig("triggers.txt")
# **** from here down is about drawing ****
frame = Frame(master)
frame.pack(side=BOTTOM)
scrollbar = Scrollbar(master)
scrollbar.pack(side=RIGHT, fill=Y)
t = "Google & Yahoo Top News"
title = StringVar()
title.set(t)
ttl = Label(master, textvariable=title, font=("Helvetica", 18))
ttl.pack(side=TOP)
cont = Text(master, font=("Helvetica", 14), yscrollcommand=scrollbar.set)
cont.pack(side=BOTTOM)
cont.tag_config("title", justify='center')
button = Button(frame, text="Exit", command=root.destroy)
button.pack(side=BOTTOM)
# Gather stories
guidShown = []
def get_cont(newstory):
if newstory.getGuid() not in guidShown:
cont.insert(END, newstory.getTitle() + "\n", "title")
cont.insert(END, "\n---------------------------------------------------------------\n", "title")
cont.insert(END, newstory.getSummary())
cont.insert(END, "\n*********************************************************************\n", "title")
guidShown.append(newstory.getGuid())
while True:
print "Polling . . .",
# Get stories from Google's Top Stories RSS news feed
stories = process("http://news.google.com/?output=rss")
# Get stories from Yahoo's Top Stories RSS news feed
stories.extend(process("http://rss.news.yahoo.com/rss/topstories"))
# Process the stories
stories = filterStories(stories, triggerlist)
map(get_cont, stories)
scrollbar.config(command=cont.yview)
print "Sleeping..."
time.sleep(SLEEPTIME)
except Exception as e:
print e
if __name__ == '__main__':
root = Tk()
root.title("Some RSS parser")
thread.start_new_thread(main_thread, (root,))
root.mainloop()
| {
"repo_name": "FylmTM/edX-code",
"path": "MITx_6.00.1x/problem_set_7/ps7.py",
"copies": "1",
"size": "9271",
"license": "mit",
"hash": 736300026658741800,
"line_mean": 27.0939393939,
"line_max": 120,
"alpha_frac": 0.5917376766,
"autogenerated": false,
"ratio": 4.041412380122058,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5133150056722058,
"avg_score": null,
"num_lines": null
} |
# 6.00.1x Problem Set 7
# RSS Feed Filter
import string
import time
from Tkinter import *
import feedparser
from project_util import translate_html
# Problem Set 7
#======================
# Code for retrieving and parsing RSS feeds
# Do not change this code
#======================
def process(url):
"""
Fetches news items from the rss url and parses them.
Returns a list of NewsStory-s.
"""
feed = feedparser.parse(url)
entries = feed.entries
ret = []
for entry in entries:
guid = entry.guid
title = translate_html(entry.title)
link = entry.link
summary = translate_html(entry.summary)
try:
subject = translate_html(entry.tags[0]['term'])
except AttributeError:
subject = ""
newsStory = NewsStory(guid, title, subject, summary, link)
ret.append(newsStory)
return ret
#======================
#======================
# Part 1
# Data structure design
#======================
# Problem 1
class NewsStory:
def __init__(self, guid, title, subject, summary, link):
self.guid = guid
self.title = title
self.subject = subject
self.summary = summary
self.link = link
def getGuid(self):
return self.guid
def getTitle(self):
return self.title
def getSubject(self):
return self.subject
def getSummary(self):
return self.summary
def getLink(self):
return self.link
#======================
# Part 2
# Triggers
#======================
class Trigger(object):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
raise NotImplementedError
# Whole Word Triggers
# Problems 2-5
class WordTrigger(Trigger):
def __init__(self, word):
self.word = string.lower(word)
def isWordIn(self, text):
# Convert all letters to lowercase
cleanedText = string.lower(text)
# Replace punctuation characters with spaces
for char in string.punctuation:
cleanedText = string.replace(cleanedText, char, " ")
# Build a list of all the words in the text
wordList = string.split(cleanedText)
return self.word in wordList
class TitleTrigger(WordTrigger):
def __init__(self, word):
WordTrigger.__init__(self, word)
def evaluate(self, story):
'''
:param story: NewsStory
:return: if the title of the story contains the given word
'''
return self.isWordIn(story.getTitle())
class SubjectTrigger(WordTrigger):
def __init__(self, word):
WordTrigger.__init__(self, word)
def evaluate(self, story):
'''
:param story: NewsStory
:return: if the subject of the story contains the given word
'''
return self.isWordIn(story.getSubject())
class SummaryTrigger(WordTrigger):
def __init__(self, word):
WordTrigger.__init__(self, word)
def evaluate(self, story):
'''
:param story: NewsStory
:return: if the summary of the story contains the given word
'''
return self.isWordIn(story.getSummary())
# Composite Triggers
# Problems 6-8
class NotTrigger(Trigger):
def __init__(self, trigger):
self.trigger = trigger
def evaluate(self, story):
return not self.trigger.evaluate(story)
class AndTrigger(Trigger):
def __init__(self, firstTrigger, secondTrigger):
self.firstTrigger = firstTrigger
self.secondTrigger = secondTrigger
def evaluate(self, story):
return self.firstTrigger.evaluate(story) and self.secondTrigger.evaluate(story)
class OrTrigger(Trigger):
def __init__(self, firstTrigger, secondTrigger):
self.firstTrigger = firstTrigger
self.secondTrigger = secondTrigger
def evaluate(self, story):
return self.firstTrigger.evaluate(story) or self.secondTrigger.evaluate(story)
# Phrase Trigger
# Question 9
class PhraseTrigger(Trigger):
def __init__(self, phrase):
self.phrase = phrase
def evaluate(self, story):
return self.phrase in story.getTitle() or self.phrase in story.getSubject() or self.phrase in story.getSummary()
#======================
# Part 3
# Filtering
#======================
def filterStories(stories, triggerList):
"""
Takes in a list of NewsStory instances.
Returns: a list of only the stories for which a trigger in triggerList fires.
"""
filteredStories = []
for story in stories:
filtered = False
for trigger in triggerList:
if trigger.evaluate(story):
filtered = True
break
if filtered:
filteredStories.append(story)
return filteredStories
#======================
# Part 4
# User-Specified Triggers
#======================
def makeTrigger(triggerMap, triggerType, params, name):
"""
Takes in a map of names to trigger instance, the type of trigger to make,
and the list of parameters to the constructor, and adds a new trigger
to the trigger map dictionary.
triggerMap: dictionary with names as keys (strings) and triggers as values
triggerType: string indicating the type of trigger to make (ex: "TITLE")
params: list of strings with the inputs to the trigger constructor (ex: ["world"])
name: a string representing the name of the new trigger (ex: "t1")
Modifies triggerMap, adding a new key-value pair for this trigger.
Returns a new instance of a trigger (ex: TitleTrigger, AndTrigger).
"""
trigger = None
if triggerType == 'TITLE':
trigger = TitleTrigger(params[0])
elif triggerType == 'SUBJECT':
trigger = SubjectTrigger(params[0])
elif triggerType == 'SUMMARY':
trigger = SummaryTrigger(params[0])
elif triggerType == 'NOT':
t1 = triggerMap.get(params[0])
if t1 is not None:
trigger = NotTrigger(t1)
elif triggerType == 'AND':
t1 = triggerMap.get(params[0])
t2 = triggerMap.get(params[1])
if t1 is not None and t2 is not None:
trigger = AndTrigger(t1, t2)
elif triggerType == 'OR':
t1 = triggerMap.get(params[0])
t2 = triggerMap.get(params[1])
if t1 is not None and t2 is not None:
trigger = OrTrigger(t1, t2)
elif triggerType == 'PHRASE':
trigger = PhraseTrigger(string.join(params, " "))
if trigger is not None and name not in triggerMap.keys():
triggerMap[name] = trigger
return trigger
def readTriggerConfig(filename):
"""
Returns a list of trigger objects
that correspond to the rules set
in the file filename
"""
# Here's some code that we give you
# to read in the file and eliminate
# blank lines and comments
triggerfile = open(filename, "r")
all = [ line.rstrip() for line in triggerfile.readlines() ]
lines = []
for line in all:
if len(line) == 0 or line[0] == '#':
continue
lines.append(line)
triggers = []
triggerMap = {}
# Be sure you understand this code - we've written it for you,
# but it's code you should be able to write yourself
for line in lines:
linesplit = line.split(" ")
# Making a new trigger
if linesplit[0] != "ADD":
trigger = makeTrigger(triggerMap, linesplit[1],
linesplit[2:], linesplit[0])
# Add the triggers to the list
else:
for name in linesplit[1:]:
triggers.append(triggerMap[name])
return triggers
import thread
SLEEPTIME = 60 #seconds -- how often we poll
def main_thread(master):
# A sample trigger list - you'll replace
# this with something more configurable in Problem 11
try:
# These will probably generate a few hits...
t1 = TitleTrigger("Obama")
t2 = SubjectTrigger("Romney")
t3 = PhraseTrigger("Election")
t4 = OrTrigger(t2, t3)
triggerlist = [t1, t4]
# TODO: Problem 11
# After implementing makeTrigger, uncomment the line below:
# triggerlist = readTriggerConfig("triggers.txt")
# **** from here down is about drawing ****
frame = Frame(master)
frame.pack(side=BOTTOM)
scrollbar = Scrollbar(master)
scrollbar.pack(side=RIGHT,fill=Y)
t = "Google & Yahoo Top News"
title = StringVar()
title.set(t)
ttl = Label(master, textvariable=title, font=("Helvetica", 18))
ttl.pack(side=TOP)
cont = Text(master, font=("Helvetica",14), yscrollcommand=scrollbar.set)
cont.pack(side=BOTTOM)
cont.tag_config("title", justify='center')
button = Button(frame, text="Exit", command=root.destroy)
button.pack(side=BOTTOM)
# Gather stories
guidShown = []
def get_cont(newstory):
if newstory.getGuid() not in guidShown:
cont.insert(END, newstory.getTitle()+"\n", "title")
cont.insert(END, "\n---------------------------------------------------------------\n", "title")
cont.insert(END, newstory.getSummary())
cont.insert(END, "\n*********************************************************************\n", "title")
guidShown.append(newstory.getGuid())
while True:
print "Polling . . .",
# Get stories from Google's Top Stories RSS news feed
stories = process("http://news.google.com/?output=rss")
# Get stories from Yahoo's Top Stories RSS news feed
stories.extend(process("http://rss.news.yahoo.com/rss/topstories"))
# Process the stories
stories = filterStories(stories, triggerlist)
map(get_cont, stories)
scrollbar.config(command=cont.yview)
print "Sleeping..."
time.sleep(SLEEPTIME)
except Exception as e:
print e
if __name__ == '__main__':
root = Tk()
root.title("Some RSS parser")
thread.start_new_thread(main_thread, (root,))
root.mainloop() | {
"repo_name": "nicola88/edx",
"path": "MITx/6.00.1x/Week-7/Problem-Set-7/ps7.py",
"copies": "1",
"size": "10277",
"license": "mit",
"hash": 7510099204902020000,
"line_mean": 28.1161473088,
"line_max": 120,
"alpha_frac": 0.5915150336,
"autogenerated": false,
"ratio": 4.122342559165664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5213857592765664,
"avg_score": null,
"num_lines": null
} |
# 6.00.2x Problem Set 2: Simulating robots
import math
import random
import ps2_visualize
import pylab
# For Python 2.7:
from ps2_verify_movement27 import testRobotMovement
# If you get a "Bad magic number" ImportError, you are not using
# Python 2.7 and using most likely Python 2.6:
# === Provided class Position
class Position(object):
"""
A Position represents a location in a two-dimensional room.
"""
def __init__(self, x, y):
"""
Initializes a position with coordinates (x, y).
"""
self.x = x
self.y = y
def getX(self):
return self.x
def getY(self):
return self.y
def getNewPosition(self, angle, speed):
"""
Computes and returns the new Position after a single clock-tick has
passed, with this object as the current position, and with the
specified angle and speed.
Does NOT test whether the returned position fits inside the room.
angle: number representing angle in degrees, 0 <= angle < 360
speed: positive float representing speed
Returns: a Position object representing the new position.
"""
old_x, old_y = self.getX(), self.getY()
angle = float(angle)
# Compute the change in position
delta_y = speed * math.cos(math.radians(angle))
delta_x = speed * math.sin(math.radians(angle))
# Add that to the existing position
new_x = old_x + delta_x
new_y = old_y + delta_y
return Position(new_x, new_y)
def __str__(self):
return "(%0.2f, %0.2f)" % (self.x, self.y)
# === Problem 1
class RectangularRoom(object):
"""
A RectangularRoom represents a rectangular region containing clean or dirty
tiles.
A room has a width and a height and contains (width * height) tiles. At any
particular time, each of these tiles is either clean or dirty.
"""
def __init__(self, width, height):
"""
Initializes a rectangular room with the specified width and height.
Initially, no tiles in the room have been cleaned.
width: an integer > 0
height: an integer > 0
"""
self.width = width
self.height = height
self.tiles = {}
for idx in range(self.width):
for idy in range(self.height):
self.tiles[(idx, idy)] = 0
def cleanTileAtPosition(self, pos):
"""
Mark the tile under the position POS as cleaned.
Assumes that POS represents a valid position inside this room.
pos: a Position
"""
self.tiles[(int(pos.getX()), int(pos.getY()))] = 1
def isTileCleaned(self, m, n):
"""
Return True if the tile (m, n) has been cleaned.
Assumes that (m, n) represents a valid tile inside the room.
m: an integer
n: an integer
returns: True if (m, n) is cleaned, False otherwise
"""
return self.tiles[(m, n)] == 1
def getNumTiles(self):
"""
Return the total number of tiles in the room.
returns: an integer
"""
return self.width * self.height
def getNumCleanedTiles(self):
"""
Return the total number of clean tiles in the room.
returns: an integer
"""
return sum(self.tiles.values())
def getRandomPosition(self):
"""
Return a random position inside the room.
returns: a Position object.
"""
rdx = self.width * random.random()
rdy = self.height * random.random()
return Position(rdx, rdy)
def isPositionInRoom(self, pos):
"""
Return True if pos is inside the room.
pos: a Position object.
returns: True if pos is in the room, False otherwise.
"""
return 0 <= pos.getX() < (self.width) and 0 <= pos.getY() < (self.height)
class Robot(object):
"""
Represents a robot cleaning a particular room.
At all times the robot has a particular position and direction in the room.
The robot also has a fixed speed.
Subclasses of Robot should provide movement strategies by implementing
updatePositionAndClean(), which simulates a single time-step.
"""
def __init__(self, room, speed):
"""
Initializes a Robot with the given speed in the specified room. The
robot initially has a random direction and a random position in the
room. The robot cleans the tile it is on.
room: a RectangularRoom object.
speed: a float (speed > 0)
"""
self.room = room
self.speed = speed
self.setRobotPosition(room.getRandomPosition())
self.room.cleanTileAtPosition(self.getRobotPosition())
self.setRobotDirection(random.randint(0, 360))
def getRobotPosition(self):
"""
Return the position of the robot.
returns: a Position object giving the robot's position.
"""
return self.position
def getRobotDirection(self):
"""
Return the direction of the robot.
returns: an integer d giving the direction of the robot as an angle in
degrees, 0 <= d < 360.
"""
return self.direction
def setRobotPosition(self, position):
"""
Set the position of the robot to POSITION.
position: a Position object.
"""
self.position = Position(position.getX(), position.getY())
def setRobotDirection(self, direction):
"""
Set the direction of the robot to DIRECTION.
direction: integer representing an angle in degrees
"""
self.direction = direction
def updatePositionAndClean(self):
"""
Simulate the raise passage of a single time-step.
Move the robot to a new position and mark the tile it is on as having
been cleaned.
"""
raise NotImplementedError # don't change this!
# === Problem 2
class StandardRobot(Robot):
"""
A StandardRobot is a Robot with the standard movement strategy.
At each time-step, a StandardRobot attempts to move in its current
direction; when it would hit a wall, it *instead* chooses a new direction
randomly.
"""
def updatePositionAndClean(self):
"""
Simulate the raise passage of a single time-step.
Move the robot to a new position and mark the tile it is on as having
been cleaned.
"""
position = self.getRobotPosition()
direction = self.getRobotDirection()
newPosition = position.getNewPosition(direction, self.speed)
while (not self.room.isPositionInRoom(newPosition)):
direction = random.randint(0, 360)
newPosition = position.getNewPosition(direction, self.speed)
self.setRobotPosition(newPosition)
self.room.cleanTileAtPosition(self.position)
if (direction != self.getRobotDirection()):
self.setRobotDirection(direction)
# Uncomment this line to see your implementation of StandardRobot in action!
# testRobotMovement(StandardRobot, RectangularRoom)
# === Problem 3
def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,
robot_type):
"""
Runs NUM_TRIALS trials of the simulation and returns the mean number of
time-steps needed to clean the fraction MIN_COVERAGE of the room.
The simulation is run with NUM_ROBOTS robots of type ROBOT_TYPE, each with
speed SPEED, in a room of dimensions WIDTH x HEIGHT.
num_robots: an int (num_robots > 0)
speed: a float (speed > 0)
width: an int (width > 0)
height: an int (height > 0)
min_coverage: a float (0 <= min_coverage <= 1.0)
num_trials: an int (num_trials > 0)
robot_type: class of robot to be instantiated (e.g. StandardRobot or
RandomWalkRobot)
"""
total = 0
for t in range(num_trials):
robots = []
room = RectangularRoom(width, height)
for n in range(num_robots):
robots.append(robot_type(room, speed))
coverage = float(room.getNumCleanedTiles()) / float(room.getNumTiles())
count = 0
while (coverage < min_coverage):
count += 1
for robot in robots:
robot.updatePositionAndClean()
coverage = float(room.getNumCleanedTiles()) / float(room.getNumTiles())
total += count
return float(total) / float(num_trials)
# raise NotImplementedError
# Uncomment this line to see how much your simulation takes on average
# print runSimulation(1, 1.0, 10, 10, 0.75, 30, RandomWalkRobot)
# === Problem 4
class RandomWalkRobot(Robot):
"""
A RandomWalkRobot is a robot with the "random walk" movement strategy: it
chooses a new direction at random at the end of each time-step.
"""
def updatePositionAndClean(self):
"""
Simulate the passage of a single time-step.
Move the robot to a new position and mark the tile it is on as having
been cleaned.
"""
direction = self.getRobotDirection()
position = self.getRobotPosition()
newPosition = position.getNewPosition(direction, self.speed)
direction = random.randint(0, 360)
while (not self.room.isPositionInRoom(newPosition)):
direction = random.randint(0, 360)
newPosition = position.getNewPosition(direction, self.speed)
self.setRobotPosition(newPosition)
self.room.cleanTileAtPosition(self.position)
self.setRobotDirection(direction)
print runSimulation(1, 1.0, 10, 10, 0.75, 30, RandomWalkRobot)
def showPlot1(title, x_label, y_label):
"""
What information does the plot produced by this function tell you?
"""
num_robot_range = range(1, 11)
times1 = []
times2 = []
for num_robots in num_robot_range:
print "Plotting", num_robots, "robots..."
times1.append(runSimulation(num_robots, 1.0, 20, 20, 0.8, 20, StandardRobot))
times2.append(runSimulation(num_robots, 1.0, 20, 20, 0.8, 20, RandomWalkRobot))
pylab.plot(num_robot_range, times1)
pylab.plot(num_robot_range, times2)
pylab.title(title)
pylab.legend(('StandardRobot', 'RandomWalkRobot'))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def showPlot2(title, x_label, y_label):
"""
What information does the plot produced by this function tell you?
"""
aspect_ratios = []
times1 = []
times2 = []
for width in [10, 20, 25, 50]:
height = 300/width
print "Plotting cleaning time for a room of width:", width, "by height:", height
aspect_ratios.append(float(width) / height)
times1.append(runSimulation(2, 1.0, width, height, 0.8, 200, StandardRobot))
times2.append(runSimulation(2, 1.0, width, height, 0.8, 200, RandomWalkRobot))
pylab.plot(aspect_ratios, times1)
pylab.plot(aspect_ratios, times2)
pylab.title(title)
pylab.legend(('StandardRobot', 'RandomWalkRobot'))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
# === Problem 5
#
# 1) Write a function call to showPlot1 that generates an appropriately-labeled
# plot.
#
# (... your call here ...)
#
#
# 2) Write a function call to showPlot2 that generates an appropriately-labeled
# plot.
#
# (... your call here ...)
#
| {
"repo_name": "xqliu/coursera",
"path": "6.00.2x_IntroductionToComputationalThinkingAndDataScience/ProblemSet2/ps2.py",
"copies": "1",
"size": "11499",
"license": "mit",
"hash": 3226943671527708700,
"line_mean": 30.3324250681,
"line_max": 88,
"alpha_frac": 0.6227498043,
"autogenerated": false,
"ratio": 4.0094142259414225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005051600659699071,
"num_lines": 367
} |
# 6.00.2x Problem Set 2: Simulating robots
import math
import random
import ps2_visualize
import pylab
# For Python 3.5:
from ps2_verify_movement35 import testRobotMovement
# If you get a "Bad magic number" ImportError, you are not using Python 3.5
# === Provided class Position
class Position(object):
"""
A Position represents a location in a two-dimensional room.
"""
def __init__(self, x, y):
"""
Initializes a position with coordinates (x, y).
"""
self.x = x
self.y = y
def getX(self):
return self.x
def getY(self):
return self.y
def getNewPosition(self, angle, speed):
"""
Computes and returns the new Position after a single clock-tick has
passed, with this object as the current position, and with the
specified angle and speed.
Does NOT test whether the returned position fits inside the room.
angle: number representing angle in degrees, 0 <= angle < 360
speed: positive float representing speed
Returns: a Position object representing the new position.
"""
old_x, old_y = self.getX(), self.getY()
angle = float(angle)
# Compute the change in position
delta_y = speed * math.cos(math.radians(angle))
delta_x = speed * math.sin(math.radians(angle))
# Add that to the existing position
new_x = old_x + delta_x
new_y = old_y + delta_y
return Position(new_x, new_y)
def __str__(self):
return "(%0.2f, %0.2f)" % (self.x, self.y)
# === Problem 1
class RectangularRoom(object):
"""
A RectangularRoom represents a rectangular region containing clean or dirty
tiles.
A room has a width and a height and contains (width * height) tiles. At any
particular time, each of these tiles is either clean or dirty.
"""
def __init__(self, width, height):
"""
Initializes a rectangular room with the specified width and height.
Initially, no tiles in the room have been cleaned.
width: an integer > 0
height: an integer > 0
"""
self.width = width
self.height = height
self.clean = []
# raise NotImplementedError
def cleanTileAtPosition(self, pos):
"""
Mark the tile under the position POS as cleaned.
Assumes that POS represents a valid position inside this room.
pos: a Position
"""
point = (int(pos.getX()), int(pos.getY()))
if point not in self.clean:
self.clean.append(point)
# raise NotImplementedError
def isTileCleaned(self, m, n):
"""
Return True if the tile (m, n) has been cleaned.
Assumes that (m, n) represents a valid tile inside the room.
m: an integer
n: an integer
returns: True if (m, n) is cleaned, False otherwise
"""
point = (m, n)
return point in self.clean
# raise NotImplementedError
def getNumTiles(self):
"""
Return the total number of tiles in the room.
returns: an integer
"""
return self.width * self.height
# raise NotImplementedError
def getNumCleanedTiles(self):
"""
Return the total number of clean tiles in the room.
returns: an integer
"""
return len(self.clean)
# raise NotImplementedError
def getRandomPosition(self):
"""
Return a random position inside the room.
returns: a Position object.
"""
x = random.uniform(0, self.width)
y = random.uniform(0, self.height)
return Position(x, y)
# raise NotImplementedError
def isPositionInRoom(self, pos):
"""
Return True if pos is inside the room.
pos: a Position object.
returns: True if pos is in the room, False otherwise.
"""
if pos.getX()<self.width and pos.getX() >= 0:
if pos.getY() < self.height and pos.getY() >=0:
return True
return False
# raise NotImplementedError
# === Problem 2
class Robot(object):
"""
Represents a robot cleaning a particular room.
At all times the robot has a particular position and direction in the room.
The robot also has a fixed speed.
Subclasses of Robot should provide movement strategies by implementing
updatePositionAndClean(), which simulates a single time-step.
"""
def __init__(self, room, speed):
"""
Initializes a Robot with the given speed in the specified room. The
robot initially has a random direction and a random position in the
room. The robot cleans the tile it is on.
room: a RectangularRoom object.
speed: a float (speed > 0)
"""
self.room = room
self.speed = speed
self.direction = random.randint(0, 360)
initPos = room.getRandomPosition()
self.position = initPos
room.cleanTileAtPosition(initPos)
# raise NotImplementedError
def getRobotPosition(self):
"""
Return the position of the robot.
returns: a Position object giving the robot's position.
"""
return self.position
# raise NotImplementedError
def getRobotDirection(self):
"""
Return the direction of the robot.
returns: an integer d giving the direction of the robot as an angle in
degrees, 0 <= d < 360.
"""
return self.direction
# raise NotImplementedError
def setRobotPosition(self, position):
"""
Set the position of the robot to POSITION.
position: a Position object.
"""
# raise NotImplementedError
self.position = position
def setRobotDirection(self, direction):
"""
Set the direction of the robot to DIRECTION.
direction: integer representing an angle in degrees
"""
# raise NotImplementedError
self.direction = direction
def updatePositionAndClean(self):
"""
Simulate the raise passage of a single time-step.
Move the robot to a new position and mark the tile it is on as having
been cleaned.
"""
raise NotImplementedError # don't change this!
# === Problem 3
class StandardRobot(Robot):
"""
A StandardRobot is a Robot with the standard movement strategy.
At each time-step, a StandardRobot attempts to move in its current
direction; when it would hit a wall, it *instead* chooses a new direction
randomly.
"""
def updatePositionAndClean(self):
"""
Simulate the raise passage of a single time-step.
Move the robot to a new position and mark the tile it is on as having
been cleaned.
"""
# print(self.room.getNumTiles())
roomSize = self.room.getNumTiles()
# while roomSize > self.room.getNumCleanedTiles():
notAtWall = True
while notAtWall:
# print(self.room.getNumCleanedTiles())
currentPosition = self.getRobotPosition()
nextPosition = currentPosition.getNewPosition(self.getRobotDirection(), self.speed)
if self.room.isPositionInRoom(nextPosition):
# self.position = nextPosition
self.setRobotPosition(nextPosition)
self.room.cleanTileAtPosition(nextPosition)
notAtWall = False
else:
newDirection = random.randint(0, 360)
self.setRobotDirection(newDirection)
# raise NotImplementedError
# Uncomment this line to see your implementation of StandardRobot in action!
#testRobotMovement(StandardRobot, RectangularRoom)
# === Problem 4
def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,
robot_type):
"""
Runs NUM_TRIALS trials of the simulation and returns the mean number of
time-steps needed to clean the fraction MIN_COVERAGE of the room.
The simulation is run with NUM_ROBOTS robots of type ROBOT_TYPE, each with
speed SPEED, in a room of dimensions WIDTH x HEIGHT.
num_robots: an int (num_robots > 0)
speed: a float (speed > 0)
width: an int (width > 0)
height: an int (height > 0)
min_coverage: a float (0 <= min_coverage <= 1.0)
num_trials: an int (num_trials > 0)
robot_type: class of robot to be instantiated (e.g. StandardRobot or
RandomWalkRobot)
"""
totaltime = 0
num = num_trials
while num > 0:
#anim = ps7_visualize.RobotVisualization(num_robots, width, height)
room = RectangularRoom(width, height)
i = num_robots
robots= []
while i > 0:
robots.append(robot_type(room, speed))
i -= 1
while min_coverage * room.getNumTiles() > room.getNumCleanedTiles():
for robot in robots:
robot.updatePositionAndClean()
totaltime += 1
# anim.update(room, robots)
num -= 1
#anim.done()
return float(totaltime/num_trials)
# raise NotImplementedError
# Uncomment this line to see how much your simulation takes on average
##print(runSimulation(1, 1.0, 10, 10, 0.75, 30, StandardRobot))
# === Problem 5
class RandomWalkRobot(Robot):
"""
A RandomWalkRobot is a robot with the "random walk" movement strategy: it
chooses a new direction at random at the end of each time-step.
"""
def updatePositionAndClean(self):
"""
Simulate the passage of a single time-step.
Move the robot to a new position and mark the tile it is on as having
been cleaned.
"""
notAtWall = True
while notAtWall:
currentPosition = self.getRobotPosition()
nextPosition = currentPosition.getNewPosition(self.getRobotDirection(), self.speed)
if self.room.isPositionInRoom(nextPosition):
self.setRobotPosition(nextPosition)
self.room.cleanTileAtPosition(nextPosition)
newDirection = random.randint(0, 360)
self.setRobotDirection(newDirection)
notAtWall = False
else:
newDirection = random.randint(0, 360)
self.setRobotDirection(newDirection)
def showPlot1(title, x_label, y_label):
"""
What information does the plot produced by this function tell you?
"""
num_robot_range = range(1, 11)
times1 = []
times2 = []
for num_robots in num_robot_range:
print("Plotting", num_robots, "robots...")
times1.append(runSimulation(num_robots, 1.0, 20, 20, 0.8, 20, StandardRobot))
times2.append(runSimulation(num_robots, 1.0, 20, 20, 0.8, 20, RandomWalkRobot))
pylab.plot(num_robot_range, times1)
pylab.plot(num_robot_range, times2)
pylab.title(title)
pylab.legend(('StandardRobot', 'RandomWalkRobot'))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def showPlot2(title, x_label, y_label):
"""
What information does the plot produced by this function tell you?
"""
aspect_ratios = []
times1 = []
times2 = []
for width in [10, 20, 25, 50]:
height = 300//width
print("Plotting cleaning time for a room of width:", width, "by height:", height)
aspect_ratios.append(float(width) / height)
times1.append(runSimulation(2, 1.0, width, height, 0.8, 200, StandardRobot))
times2.append(runSimulation(2, 1.0, width, height, 0.8, 200, RandomWalkRobot))
pylab.plot(aspect_ratios, times1)
pylab.plot(aspect_ratios, times2)
pylab.title(title)
pylab.legend(('StandardRobot', 'RandomWalkRobot'))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
showPlot2('plot1', 'x', 'y')
#myRoom = RectangularRoom(4,5)
#myRobot = StandardRobot(myRoom, 3)
#
#myRobot.updatePositionAndClean()
# === Problem 6
# NOTE: If you are running the simulation, you will have to close it
# before the plot will show up.
#
# 1) Write a function call to showPlot1 that generates an appropriately-labeled
# plot.
#
# (... your call here ...)
#
#
# 2) Write a function call to showPlot2 that generates an appropriately-labeled
# plot.
#
# (... your call here ...)
#
| {
"repo_name": "approximata/edx_mit_6.00.2x",
"path": "pset2/ps2.py",
"copies": "1",
"size": "12418",
"license": "mit",
"hash": -5271029481961377000,
"line_mean": 29.140776699,
"line_max": 95,
"alpha_frac": 0.6182960219,
"autogenerated": false,
"ratio": 4.07816091954023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5196456941440231,
"avg_score": null,
"num_lines": null
} |
# 6.00.2x Problem Set 2: Simulating robots
import math
import random
import ps2_visualize
import pylab
import numpy as np
##################
# Comment/uncomment the relevant lines, depending on which version of Python you have
##################
# For Python 3.5:
from ps2_verify_movement35 import testRobotMovement
# If you get a "Bad magic number" ImportError, you are not using Python 3.5
# For Python 3.6:
# from ps2_verify_movement36 import testRobotMovement
# If you get a "Bad magic number" ImportError, you are not using Python 3.6
# === Provided class Position
class Position(object):
"""
A Position represents a location in a two-dimensional room.
"""
def __init__(self, x, y):
"""
Initializes a position with coordinates (x, y).
"""
self.x = x
self.y = y
def getX(self):
return self.x
def getY(self):
return self.y
def getNewPosition(self, angle, speed):
"""
Computes and returns the new Position after a single clock-tick has
passed, with this object as the current position, and with the
specified angle and speed.
Does NOT test whether the returned position fits inside the room.
angle: number representing angle in degrees, 0 <= angle < 360
speed: positive float representing speed
Returns: a Position object representing the new position.
"""
old_x, old_y = self.getX(), self.getY()
angle = float(angle)
# Compute the change in position
delta_y = speed * math.cos(math.radians(angle))
delta_x = speed * math.sin(math.radians(angle))
# Add that to the existing position
new_x = old_x + delta_x
new_y = old_y + delta_y
return Position(new_x, new_y)
def __str__(self):
return "(%0.2f, %0.2f)" % (self.x, self.y)
# === Problem 1
class RectangularRoom(object):
"""
A RectangularRoom represents a rectangular region containing clean or dirty
tiles.
A room has a width and a height and contains (width * height) tiles. At any
particular time, each of these tiles is either clean or dirty.
"""
def __init__(self, width, height):
"""
Initializes a rectangular room with the specified width and height.
Initially, no tiles in the room have been cleaned.
width: an integer > 0
height: an integer > 0
"""
self.width = width
self.height = height
self.matrix = np.zeros(shape=(width, height), dtype=np.int)
def cleanTileAtPosition(self, pos):
"""
Mark the tile under the position POS as cleaned.
Assumes that POS represents a valid position inside this room.
pos: a Position
"""
x = math.floor(pos.getX())
y = math.floor(pos.getY())
self.matrix[x][y] = 1
def isTileCleaned(self, m, n):
"""
Return True if the tile (m, n) has been cleaned.
Assumes that (m, n) represents a valid tile inside the room.
m: an integer
n: an integer
returns: True if (m, n) is cleaned, False otherwise
"""
return self.matrix[m][n]
def getNumTiles(self):
"""
Return the total number of tiles in the room.
returns: an integer
"""
return self.width * self.height
def getNumCleanedTiles(self):
"""
Return the total number of clean tiles in the room.
returns: an integer
"""
return np.sum(self.matrix)
def getRandomPosition(self):
"""
Return a random position inside the room.
returns: a Position object.
"""
x = random.choice([num for num in range(self.width)])
y = random.choice([num for num in range(self.height)])
return Position(x, y)
def isPositionInRoom(self, pos):
"""
Return True if pos is inside the room.
pos: a Position object.
returns: True if pos is in the room, False otherwise.
"""
x = pos.getX()
y = pos.getY()
return (0 <= x < self.width) and (0 <= y < self.height)
# === Problem 2
class Robot(object):
"""
Represents a robot cleaning a particular room.
At all times the robot has a particular position and direction in the room.
The robot also has a fixed speed.
Subclasses of Robot should provide movement strategies by implementing
updatePositionAndClean(), which simulates a single time-step.
"""
def __init__(self, room, speed):
"""
Initializes a Robot with the given speed in the specified room. The
robot initially has a random direction and a random position in the
room. The robot cleans the tile it is on.
room: a RectangularRoom object.
speed: a float (speed > 0)
"""
self.speed = speed
self.direction = random.randint(0, 360)
self.position = room.getRandomPosition()
self.room = room
self.room.cleanTileAtPosition(self.position)
def getRobotPosition(self):
"""
Return the position of the robot.
returns: a Position object giving the robot's position.
"""
return self.position
def getRobotDirection(self):
"""
Return the direction of the robot.
returns: an integer d giving the direction of the robot as an angle in
degrees, 0 <= d < 360.
"""
return self.direction
def setRobotPosition(self, position):
"""
Set the position of the robot to POSITION.
position: a Position object.
"""
self.position = position
def setRobotDirection(self, direction):
"""
Set the direction of the robot to DIRECTION.
direction: integer representing an angle in degrees
"""
self.direction = direction
def updatePositionAndClean(self):
"""
Simulate the raise passage of a single time-step.
Move the robot to a new position and mark the tile it is on as having
been cleaned.
"""
raise NotImplementedError # don't change this!
# === Problem 3
class StandardRobot(Robot):
"""
A StandardRobot is a Robot with the standard movement strategy.
At each time-step, a StandardRobot attempts to move in its current
direction; when it would hit a wall, it *instead* chooses a new direction
randomly.
"""
def updatePositionAndClean(self):
"""
Simulate the raise passage of a single time-step.
Move the robot to a new position and mark the tile it is on as having
been cleaned.
"""
if self.room.isPositionInRoom(self.position.getNewPosition(self.direction, self.speed)):
self.position = self.position.getNewPosition(self.direction, self.speed)
self.room.cleanTileAtPosition(self.position)
else:
self.setRobotDirection(random.randint(0, 360))
# Uncomment this line to see your implementation of StandardRobot in action!
# testRobotMovement(StandardRobot, RectangularRoom)
# === Problem 4
def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,
robot_type):
"""
Runs NUM_TRIALS trials of the simulation and returns the mean number of
time-steps needed to clean the fraction MIN_COVERAGE of the room.
The simulation is run with NUM_ROBOTS robots of type ROBOT_TYPE, each with
speed SPEED, in a room of dimensions WIDTH x HEIGHT.
num_robots: an int (num_robots > 0)
speed: a float (speed > 0)
width: an int (width > 0)
height: an int (height > 0)
min_coverage: a float (0 <= min_coverage <= 1.0)
num_trials: an int (num_trials > 0)
robot_type: class of robot to be instantiated (e.g. StandardRobot or
RandomWalkRobot)
"""
mean = []
for trial in range(num_trials):
room = RectangularRoom(width, height)
robots = [robot_type(room, speed) for robot in range(num_robots)]
tics = 0
while (room.getNumCleanedTiles() / room.getNumTiles()) < min_coverage:
for robot in robots:
robot.updatePositionAndClean()
tics += 1
mean.append(tics)
return sum(mean) / num_trials
# Uncomment this line to see how much your simulation takes on average
# print(runSimulation(1, 1.0, 10, 10, 0.75, 30, StandardRobot))
# === Problem 5
class RandomWalkRobot(Robot):
"""
A RandomWalkRobot is a robot with the "random walk" movement strategy: it
chooses a new direction at random at the end of each time-step.
"""
def updatePositionAndClean(self):
"""
Simulate the passage of a single time-step.
Move the robot to a new position and mark the tile it is on as having
been cleaned.
"""
if self.room.isPositionInRoom(self.position.getNewPosition(self.direction, self.speed)):
self.position = self.position.getNewPosition(self.direction, self.speed)
self.room.cleanTileAtPosition(self.position)
self.setRobotDirection(random.randint(0, 360))
def showPlot1(title, x_label, y_label):
"""
What information does the plot produced by this function tell you?
"""
num_robot_range = range(1, 11)
times1 = []
times2 = []
for num_robots in num_robot_range:
print("Plotting", num_robots, "robots...")
times1.append(runSimulation(num_robots, 1.0, 20, 20, 0.8, 20, StandardRobot))
times2.append(runSimulation(num_robots, 1.0, 20, 20, 0.8, 20, RandomWalkRobot))
pylab.plot(num_robot_range, times1)
pylab.plot(num_robot_range, times2)
pylab.title(title)
pylab.legend(('StandardRobot', 'RandomWalkRobot'))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def showPlot2(title, x_label, y_label):
"""
What information does the plot produced by this function tell you?
"""
aspect_ratios = []
times1 = []
times2 = []
for width in [10, 20, 25, 50]:
height = 300 // width
print("Plotting cleaning time for a room of width:", width, "by height:", height)
aspect_ratios.append(float(width) / height)
times1.append(runSimulation(2, 1.0, width, height, 0.8, 200, StandardRobot))
times2.append(runSimulation(2, 1.0, width, height, 0.8, 200, RandomWalkRobot))
pylab.plot(aspect_ratios, times1)
pylab.plot(aspect_ratios, times2)
pylab.title(title)
pylab.legend(('StandardRobot', 'RandomWalkRobot'))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
# === Problem 6
# NOTE: If you are running the simulation, you will have to close it
# before the plot will show up.
#
# 1) Write a function call to showPlot1 that generates an appropriately-labeled
# plot.
# showPlot1(title="Time it Takes 1 - 10 Robots To Clean 80% Of A Room", x_label="Number of Robots", y_label="Time-steps")
#
# 2) Write a function call to showPlot2 that generates an appropriately-labeled
# plot.
#
showPlot2(title="Time It Takes Two Robots To Clean 80% Of Variously Shaped Rooms",
x_label="Room Aspect Ratio", y_label="Time-steps")
| {
"repo_name": "Mdlkxzmcp/various_python",
"path": "MITx/6002x/pset2/ps2.py",
"copies": "1",
"size": "11266",
"license": "mit",
"hash": -7466551772722618000,
"line_mean": 30.2077562327,
"line_max": 121,
"alpha_frac": 0.6294159418,
"autogenerated": false,
"ratio": 3.9295430763864667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5058959018186466,
"avg_score": null,
"num_lines": null
} |
# 6.00.2x Problem Set 2: Simulating robots
import math
import random
import ps2_visualize
import pylab
##################
## Comment/uncomment the relevant lines, depending on which version of Python you have
##################
# For Python 3.5:
#from ps2_verify_movement35 import testRobotMovement
# If you get a "Bad magic number" ImportError, you are not using Python 3.5
# For Python 3.6:
from ps2_verify_movement36 import testRobotMovement
# If you get a "Bad magic number" ImportError, you are not using Python 3.6
# === Provided class Position
class Position(object):
"""
A Position represents a location in a two-dimensional room.
"""
def __init__(self, x, y):
"""
Initializes a position with coordinates (x, y).
"""
self.x = x
self.y = y
def getX(self):
return self.x
def getY(self):
return self.y
def getNewPosition(self, angle, speed):
"""
Computes and returns the new Position after a single clock-tick has
passed, with this object as the current position, and with the
specified angle and speed.
Does NOT test whether the returned position fits inside the room.
angle: number representing angle in degrees, 0 <= angle < 360
speed: positive float representing speed
Returns: a Position object representing the new position.
"""
old_x, old_y = self.getX(), self.getY()
angle = float(angle)
# Compute the change in position
delta_y = speed * math.cos(math.radians(angle))
delta_x = speed * math.sin(math.radians(angle))
# Add that to the existing position
new_x = old_x + delta_x
new_y = old_y + delta_y
return Position(new_x, new_y)
def __str__(self):
return "(%0.2f, %0.2f)" % (self.x, self.y)
# === Problem 1
class RectangularRoom(object):
"""
A RectangularRoom represents a rectangular region containing clean or dirty
tiles.
A room has a width and a height and contains (width * height) tiles. At any
particular time, each of these tiles is either clean or dirty.
"""
def __init__(self, width, height):
"""
Initializes a rectangular room with the specified width and height.
Initially, no tiles in the room have been cleaned.
width: an integer > 0
height: an integer > 0
"""
self.width = width
self.height = height
self.cleaned = []
def cleanTileAtPosition(self, pos):
"""
Mark the tile under the position POS as cleaned.
Assumes that POS represents a valid position inside this room.
pos: a Position
"""
x = math.floor(pos.getX())
y = math.floor(pos.getY())
if (x,y) not in self.cleaned:
self.cleaned.append((x,y))
def isTileCleaned(self, m, n):
"""
Return True if the tile (m, n) has been cleaned.
Assumes that (m, n) represents a valid tile inside the room.
m: an integer
n: an integer
returns: True if (m, n) is cleaned, False otherwise
"""
return (m, n) in self.cleaned
def getNumTiles(self):
"""
Return the total number of tiles in the room.
returns: an integer
"""
return self.width * self.height
def getNumCleanedTiles(self):
"""
Return the total number of clean tiles in the room.
returns: an integer
"""
return len(self.cleaned)
def getRandomPosition(self):
"""
Return a random position inside the room.
returns: a Position object.
"""
# random.seed(0)
x = random.choice(range(self.width))
y = random.choice(range(self.height))
return Position(x,y)
def isPositionInRoom(self, pos):
"""
Return True if pos is inside the room.
pos: a Position object.
returns: True if pos is in the room, False otherwise.
"""
return (0 <= pos.getX() < self.width and 0 <= pos.getY() < self.height)
# === Problem 2
class Robot(object):
"""
Represents a robot cleaning a particular room.
At all times the robot has a particular position and direction in the room.
The robot also has a fixed speed.
Subclasses of Robot should provide movement strategies by implementing
updatePositionAndClean(), which simulates a single time-step.
"""
def __init__(self, room, speed):
"""
Initializes a Robot with the given speed in the specified room. The
robot initially has a random direction and a random position in the
room. The robot cleans the tile it is on.
room: a RectangularRoom object.
speed: a float (speed > 0)
"""
self.room = room
if speed > 0:
self.speed = speed
else:
raise ValueError("Speed must be a positive number")
self.pos = self.room.getRandomPosition()
self.direction = int(360 * random.random())
self.room.cleanTileAtPosition(self.pos)
def getRobotPosition(self):
"""
Return the position of the robot.
returns: a Position object giving the robot's position.
"""
return self.pos
def getRobotDirection(self):
"""
Return the direction of the robot.
returns: an integer d giving the direction of the robot as an angle in
degrees, 0 <= d < 360.
"""
return self.direction
def setRobotPosition(self, position):
"""
Set the position of the robot to POSITION.
position: a Position object.
"""
self.pos = position
def setRobotDirection(self, direction):
"""
Set the direction of the robot to DIRECTION.
direction: integer representing an angle in degrees
"""
self.direction = direction
def updatePositionAndClean(self):
"""
Simulate the raise passage of a single time-step.
Move the robot to a new position and mark the tile it is on as having
been cleaned.
"""
raise NotImplementedError # don't change this!
# === Problem 3
class StandardRobot(Robot):
"""
A StandardRobot is a Robot with the standard movement strategy.
At each time-step, a StandardRobot attempts to move in its current
direction; when it would hit a wall, it *instead* chooses a new direction
randomly.
"""
def updatePositionAndClean(self):
"""
Simulate the raise passage of a single time-step.
Move the robot to a new position and mark the tile it is on as having
been cleaned.
"""
new_position = self.pos.getNewPosition(self.direction, self.speed)
if self.room.isPositionInRoom(new_position):
self.setRobotPosition(new_position)
self.room.cleanTileAtPosition(self.pos)
else:
self.setRobotDirection(int(360 * random.random()))
# Uncomment this line to see your implementation of StandardRobot in action!
#testRobotMovement(StandardRobot, RectangularRoom)
# === Problem 4
def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,
robot_type):
"""
Runs NUM_TRIALS trials of the simulation and returns the mean number of
time-steps needed to clean the fraction MIN_COVERAGE of the room.
The simulation is run with NUM_ROBOTS robots of type ROBOT_TYPE, each with
speed SPEED, in a room of dimensions WIDTH x HEIGHT.
num_robots: an int (num_robots > 0)
speed: a float (speed > 0)
width: an int (width > 0)
height: an int (height > 0)
min_coverage: a float (0 <= min_coverage <= 1.0)
num_trials: an int (num_trials > 0)
robot_type: class of robot to be instantiated (e.g. StandardRobot or
RandomWalkRobot)
"""
total_time_steps = 0
for n in range(num_trials):
# anim = ps2_visualize.RobotVisualization(num_robots, width, height)
room = RectangularRoom(width, height)
robots = []
for i in range(num_robots):
robots.append(robot_type(room, speed))
while min_coverage * room.getNumTiles() > room.getNumCleanedTiles():
for robot in robots:
robot.updatePositionAndClean()
total_time_steps += 1
# anim.update(room, robots)
# anim.done()
return float(total_time_steps/num_trials)
# Uncomment this line to see how much your simulation takes on average
#print(runSimulation(1, 1.0, 10, 10, 0.75, 30, StandardRobot))
# === Problem 5
class RandomWalkRobot(Robot):
"""
A RandomWalkRobot is a robot with the "random walk" movement strategy: it
chooses a new direction at random at the end of each time-step.
"""
def updatePositionAndClean(self):
"""
Simulate the passage of a single time-step.
Move the robot to a new position and mark the tile it is on as having
been cleaned.
"""
new_position = self.pos.getNewPosition(self.direction, self.speed)
if self.room.isPositionInRoom(new_position):
self.setRobotPosition(new_position)
self.setRobotDirection(int(360 * random.random()))
self.room.cleanTileAtPosition(self.pos)
else:
self.setRobotDirection(int(360 * random.random()))
def showPlot1(title, x_label, y_label):
"""
What information does the plot produced by this function tell you?
"""
num_robot_range = range(1, 11)
times1 = []
times2 = []
for num_robots in num_robot_range:
print("Plotting", num_robots, "robots...")
times1.append(runSimulation(num_robots, 1.0, 20, 20, 0.8, 20, StandardRobot))
times2.append(runSimulation(num_robots, 1.0, 20, 20, 0.8, 20, RandomWalkRobot))
pylab.plot(num_robot_range, times1)
pylab.plot(num_robot_range, times2)
pylab.title(title)
pylab.legend(('StandardRobot', 'RandomWalkRobot'))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def showPlot2(title, x_label, y_label):
"""
What information does the plot produced by this function tell you?
"""
aspect_ratios = []
times1 = []
times2 = []
for width in [10, 20, 25, 50]:
height = 300//width
print("Plotting cleaning time for a room of width:", width, "by height:", height)
aspect_ratios.append(float(width) / height)
times1.append(runSimulation(2, 1.0, width, height, 0.8, 200, StandardRobot))
times2.append(runSimulation(2, 1.0, width, height, 0.8, 200, RandomWalkRobot))
pylab.plot(aspect_ratios, times1)
pylab.plot(aspect_ratios, times2)
pylab.title(title)
pylab.legend(('StandardRobot', 'RandomWalkRobot'))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
# === Problem 6
# NOTE: If you are running the simulation, you will have to close it
# before the plot will show up.
#
# 1) Write a function call to showPlot1 that generates an appropriately-labeled
# plot.
#
showPlot1('Time for 1-10 Robots to Clean 80%', 'Number of Robots', 'Time-Steps')
#
#
# 2) Write a function call to showPlot2 that generates an appropriately-labeled
# plot.
#
showPlot2('Time for Two Robots to Clean 80% of Various Room Shapes', 'Aspect_Ratio', 'Time-Steps')
#
| {
"repo_name": "johntauber/MITx6.00.2x",
"path": "Unit2/pset2/ps2.py",
"copies": "1",
"size": "11553",
"license": "mit",
"hash": -5000792438813792000,
"line_mean": 29.7260638298,
"line_max": 98,
"alpha_frac": 0.6188868692,
"autogenerated": false,
"ratio": 3.9578622816032887,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5076749150803289,
"avg_score": null,
"num_lines": null
} |
# 6.00.2x Problem Set 4
import numpy
import random
import pylab
from ps3b import *
def simulationWithDrug(numViruses, maxPop, maxBirthProb, clearProb, resistances,
mutProb, drugTimeStep, timeStepsAfterDrug):
time_steps = drugTimeStep + timeStepsAfterDrug
total = 0
viruses = []
for i in range(numViruses):
viruses.append(ResistantVirus(maxBirthProb, clearProb, resistances, mutProb))
patient = TreatedPatient(viruses, maxPop)
for time_step in range(time_steps):
if time_step == drugTimeStep:
patient.addPrescription('guttagonol')
patient.update()
return patient.getTotalPop()
#
# PROBLEM 1
#
def simulationDelayedTreatment(numTrials):
"""
Runs simulations and make histograms for problem 1.
Runs numTrials simulations to show the relationship between delayed
treatment and patient outcome using a histogram.
Histograms of final total virus populations are displayed for delays of 300,
150, 75, 0 timesteps (followed by an additional 150 timesteps of
simulation).
numTrials: number of simulation runs to execute (an integer)
"""
drugTimeSteps = [150]
# drugTimeSteps = [300, 150, 75, 0]
for drugTimeStep in drugTimeSteps:
print drugTimeStep
populations = []
for i in range(numTrials):
if i % 100 == 0: print i
populations.append(simulationWithDrug(200, 1000, 0.1, 0.05, {'guttagonol': False}, 0.005, drugTimeStep, 150))
pylab.figure(drugTimeStep)
pylab.title('Drugs - ' + str(drugTimeStep))
pylab.hist(populations)
pylab.show()
simulationDelayedTreatment(50)
#
# PROBLEM 2
#
def simulationTwoDrugsDelayedTreatment(numTrials):
"""
Runs simulations and make histograms for problem 2.
Runs numTrials simulations to show the relationship between administration
of multiple drugs and patient outcome.
Histograms of final total virus populations are displayed for lag times of
300, 150, 75, 0 timesteps between adding drugs (followed by an additional
150 timesteps of simulation).
numTrials: number of simulation runs to execute (an integer)
"""
# TODO
| {
"repo_name": "FylmTM/edX-code",
"path": "MITx_6.00.2x/problem_set_4/ps4.py",
"copies": "1",
"size": "2221",
"license": "mit",
"hash": 763235085029246000,
"line_mean": 29.0135135135,
"line_max": 121,
"alpha_frac": 0.6920306168,
"autogenerated": false,
"ratio": 3.6529605263157894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9793706655686116,
"avg_score": 0.01025689748593454,
"num_lines": 74
} |
# 6.00.2x Problem Set 4
import numpy
import random
import pylab
from ps3b import *
#
# PROBLEM 1
#
def simulationDelayedTreatment(numTrials):
"""
Runs simulations and make histograms for problem 1.
Runs numTrials simulations to show the relationship between delayed
treatment and patient outcome using a histogram.
Histograms of final total virus populations are displayed for delays of 300,
150, 75, 0 timesteps (followed by an additional 150 timesteps of
simulation).
numTrials: number of simulation runs to execute (an integer)
"""
final = [0 for trial in range(numTrials)]
numViruses = 100
maxPop = 1000
maxBirthProb = 0.1
clearProb = 0.05
resistances = {'guttagonol': False}
mutProb = 0.005
for trial in range(numTrials):
viruses = [ResistantVirus(maxBirthProb, clearProb, resistances, mutProb)
for i in range(numViruses)]
p = TreatedPatient(viruses, maxPop)
for timeStep in range(delay):
final[trial] = p.update()
p.addPrescription('guttagonol')
for timeStep in range(150):
final[trial] = p.update()
cured = 0
for trial in final:
if trial <= 50:
cured += 1
pylab.figure()
pylab.hist(final)
pylab.title("ResistantVirus simulation")
pylab.show()
return float(cured) / numTrials
#
# PROBLEM 2
#
def simulationTwoDrugsDelayedTreatment(numTrials):
"""
Runs simulations and make histograms for problem 2.
Runs numTrials simulations to show the relationship between administration
of multiple drugs and patient outcome.
Histograms of final total virus populations are displayed for lag times of
300, 150, 75, 0 timesteps between adding drugs (followed by an additional
150 timesteps of simulation).
numTrials: number of simulation runs to execute (an integer)
"""
final = [0 for trial in range(numTrials)]
numViruses = 100
maxPop = 1000
maxBirthProb = 0.1
clearProb = 0.05
resistances = {'guttagonol': False, 'grimpex': False}
mutProb = 0.005
for trial in range(numTrials):
viruses = [ResistantVirus(maxBirthProb, clearProb, resistances, mutProb)
for i in range(numViruses)]
p = TreatedPatient(viruses, maxPop)
for timeStep in range(150):
final[trial] = p.update()
p.addPrescription('guttagonol')
for timeStep in range(delay):
final[trial] = p.update()
p.addPrescription('grimpex')
for timeStep in range(150):
final[trial] = p.update()
cured = 0
for trial in final:
if trial <= 50:
cured += 1
pylab.figure()
pylab.hist(final)
pylab.title("ResistantVirus simulation")
pylab.show()
return float(cured) / numTrials
delays = [300, 150, 75, 0]
for delay in delays:
print simulationDelayedTreatment(100)
print simulationTwoDrugsDelayedTreatment(100) | {
"repo_name": "zhouyulian17/Course",
"path": "Python/6.00.2x Introduction to Computational Thinking and Data Science/ProblemSet4/ps4.py",
"copies": "1",
"size": "3069",
"license": "mit",
"hash": 1665301354569771300,
"line_mean": 27.1651376147,
"line_max": 81,
"alpha_frac": 0.6386445096,
"autogenerated": false,
"ratio": 3.6535714285714285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9708651111521929,
"avg_score": 0.016712965329899958,
"num_lines": 109
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.