text stringlengths 38 1.54M |
|---|
# Vincent Wong
# A01051004
import random
class Player:
def __init__(self, name: str, hp: int = 10, inventory: list = (), column_position: int = 0, row_position: int = 0):
"""initiate the player constructor,
PARAM:
name is a string,
hp is an integer and by default is 10,
inventory is a list and by default is a tuple,
column position is an int, but 0 by default,
row position is an int, but 0 by default
PRECONDITION: Name is a string and must be specified for the creation of this object
POST-CONDITION: creates class variables for the player object
RETURN: None
"""
self.name = name
self.hp = hp
self.inventory = list(inventory)
self.column_position = column_position
self.row_position = row_position
self.move_phrases = ["You travelled ", "Heading ", "Onwards! to the ", "We must not lose time, we head "]
self.number_of_phrases = len(self.move_phrases)
def add_inventory(self, item: str) -> None:
"""Adds a specific item to the player's inventory,
PARAM: Item is a string
PRECONDITION: player object must be created and have an inventory
POST-CONDITION: player
RETURN: None
>>> player = Player('name')
>>> player.add_inventory('Potion')
>>> print(player.inventory)
['Potion']
>>> player.add_inventory('Sword')
>>> print(player.inventory)
['Potion', 'Sword']
"""
self.inventory.append(item)
# movement functions
def move_north(self) -> None:
"""moves the character North one space,
PARAM: None
PRECONDITION: player must have a y-coordinate or row position
POST-CONDITION: the y-coordinate or row position is decreased by 1
RETURN: None
>>> player = Player('name')
>>> print(player.row_position)
0
>>> random.seed(1)
>>> player.move_north()
Heading North
>>> print(player.row_position)
-1
"""
self.row_position -= 1
print(self.move_phrases[random.randint(1, self.number_of_phrases - 1)] + "North")
def move_east(self) -> None:
"""moves the character East one space,
PARAM: None
PRECONDITION: player must have a x-coordinate or column position
POST-CONDITION: the x-coordinate or column position is increased by 1
RETURN: None
>>> player = Player('name')
>>> print(player.column_position)
0
>>> random.seed(1)
>>> player.move_east()
Heading East
>>> print(player.column_position)
1
"""
self.column_position += 1
print(self.move_phrases[random.randint(1, self.number_of_phrases - 1)] + "East")
def move_south(self) -> None:
"""moves the character South one space,
PARAM: None
PRECONDITION: player must have a y-coordinate or row position
POST-CONDITION: the y-coordinate or row position is increased by 1
RETURN: None
>>> player = Player('name')
>>> print(player.row_position)
0
>>> random.seed(1)
>>> player.move_south()
Heading South
>>> print(player.row_position)
1
"""
self.row_position += 1
print(self.move_phrases[random.randint(1, self.number_of_phrases - 1)] + "South")
def move_west(self) -> None:
"""moves the character West one space,
PARAM: None
PRECONDITION: player must have a x-coordinate or column position
POST-CONDITION: the x-coordinate or column position is decreased by 1
RETURN: None
>>> player = Player('name')
>>> print(player.column_position)
0
>>> random.seed(1)
>>> player.move_west()
Heading West
>>> print(player.column_position)
-1
"""
self.column_position -= 1
print(self.move_phrases[random.randint(1, self.number_of_phrases - 1)] + "West")
# combat functions
def roll_attack(self) -> int:
"""Rolls a d6 dice to indicate the player's attack and returns the value. If the player has a weapon
in his inventory, modify the attack before returning,
PARAM: None
PRECONDITION: random module must be imported
POST-CONDITION: None
RETURN: Returns a random integer from 1 to 6 and modifies it based on equipment the player has
>>> player = Player('name')
>>> random.seed(1)
>>> player.roll_attack()
2
>>> player.add_inventory('Mace')
>>> player.roll_attack() # this roll is usually 5
6
>>> player.add_inventory('Sword')
>>> player.roll_attack() # this roll is suppose to be 1
3
>>> player.add_inventory('Rubber Duck of Justice')
>>> player.roll_attack() # suppose to be 3
6
"""
attack = random.randint(1, 6)
if "Rubber Duck of Justice" in self.inventory:
attack *= 2
elif "Sword" in self.inventory:
attack += 2
elif "Mace" in self.inventory:
attack += 1
else:
attack += 0
return attack
def modify_health(self, number: int) -> None:
"""Modify the health of the player. Maximum health for the player is 10 hp,
PARAM: Number is an integer
PRECONDITION: player must have a health variable, number must be an integer
POST-CONDITION: the player's health is modified to the sum of the health and number
RETURN: None
>>> player = Player('name')
>>> print(player.hp)
10
>>> player.modify_health(-4)
>>> print(player.hp)
6
>>> player.modify_health(2)
>>> print(player.hp)
8
>>> player.modify_health(200) # max hp is capped at 10
>>> print(player.hp)
10
"""
# if need to subtract hp, must multiply number by * -1
self.hp += number
# check if hp is over 10, if so, alter the hp
if self.hp > 10:
self.hp = 10
def use_potion(self) -> None:
"""Use a potion if the player has it in his inventory
PARAM: None
PRECONDITION: player must have an inventory
POST-CONDITION: removes the potion from the player's inventory if used
RETURN: None
>>> player = Player('name')
>>> player.modify_health(-5)
>>> print(player.hp)
5
>>> player.use_potion()
You do not have a potion in your inventory
>>> player.add_inventory('Potion')
>>> player.use_potion()
You used a potion! Your health is now at 8
>>> player.add_inventory('Potion')
>>> player.use_potion()
You used a potion! Your health is now at 10
"""
if "Potion" in self.inventory:
self.inventory.remove("Potion")
self.modify_health(3)
print("You used a potion! Your health is now at %d" % self.hp)
else:
print("You do not have a potion in your inventory")
|
# coding:utf-8
from flask import Blueprint,render_template, request
import requests
import os,sys
dirPath = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir,os.pardir))
sys.path.append(dirPath)
from service.wy import wy
from flask_cors import CORS
test = Blueprint('test',__name__)
CORS(test, resources=r'/*')
@test.route('/htm')
def htm():
user = { 'nickname': 'Miguel' }
return '''
<html>
<head>
<title>Home Page</title>
</head>
<body>
<h1>Hello, ''' + user['nickname'] + '''</h1>
</body>
</html>
'''
@test.route('/cs')
def cs():
user = request.values['user']
posts = [
{
'author': {'username': 'John'},
'body': 'Beautiful day in Portland!'
},
{
'author': {'username': 'Susan'},
'body': 'The Avengers movie was so cool!'
}
]
return render_template('index.html', title='Home', user=user, posts=posts)
if __name__ == "__main__":
dy()
|
import re
import urllib
def removeHTMLTags():
"""
Returns a regular expression for removing HTML tags
"""
return re.compile(r'<.*?>')
def removeNPSBs():
"""
Returns a regular expression for removing
"""
return re.compile(r' ')
def removeReferences():
"""
Returns a regular expression for removing references: [integer]
"""
return re.compile(r'[\[0-9\]]')
def removeSlashes():
"""
Returns a regular expression for removing /
"""
return re.compile(r'/')
def removeCommas():
"""
Returns a regular expression for removing ,
"""
return re.compile(r',')
def removeWhitespaces():
"""
Returns a regular expression for removing whitespaces
"""
return re.compile(r'%20')
def unquoteString(string):
"""
Used for replacing '%xx' and '+' from search terms, removes URL
encoding of string. E.g. %2F is replaced with '/' and '+' with '
'. For more information please read the urllib documentation.
"""
return urllib.unquote_plus(string)
def decodeURLcharacters(string):
"""
Used for decoding '&#xx;' from a string and return the decoded
string. It uses an anonymous function for looking up the correct
unicode character to replace '&#xx;' with.
This might fail in some cases, where there are '&#xxxx;', and x is
number. This requires an addition encoding with utf-8.
"""
return re.sub(u'&#(\d+);', lambda x: unichr(int(x.group(1))),string)
def sanitizeString():
"""
Returns a pattern that matches non-alphabetic and non-digit
characters. Used for sanitizing string e.g. 'The dog is not red,
but has a large tail' -> 'The dog is not red but has a large tail'
"""
return re.compile('[\W]')
|
"""
(C) Copyright 2018-2023 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import time
from mdtest_test_base import MdtestBase
# pylint: disable=too-few-public-methods
class RbldWidelyStriped(MdtestBase):
"""Rebuild test cases featuring mdtest.
This class contains tests for pool rebuild that feature I/O going on
during the rebuild using IOR.
:avocado: recursive
"""
def test_rebuild_widely_striped(self):
"""Jira ID: DAOS-3795/DAOS-3796.
Test Description: Verify rebuild for widely striped object using
mdtest.
Use Cases:
Create pool and container.
Use mdtest to create 120K files of size 32K with 3-way
replication.
Stop one server, let rebuild start and complete.
Destroy container and create a new one.
Use mdtest to create 120K files of size 32K with 3-way
replication.
Stop one more server in the middle of mdtest. Let rebuild to complete.
Allow mdtest to complete.
Destroy container and create a new one.
Use mdtest to create 120K files of size 32K with 3-way
replication.
Stop 2 servers in the middle of mdtest. Let rebuild to complete.
Allow mdtest to complete.
:avocado: tags=all,full_regression
:avocado: tags=hw,large
:avocado: tags=rebuild
:avocado: tags=RbldWidelyStriped,test_rebuild_widely_striped
"""
# set params
targets = self.server_managers[0].get_config_value("targets")
ranks_to_kill = self.params.get("ranks_to_kill", "/run/testparams/*")
# create pool
self.log.info(">> Creating a pool")
self.add_pool(connect=False)
# make sure pool looks good before we start
checks = {
"pi_nnodes": len(self.hostlist_servers),
"pi_ntargets": len(self.hostlist_servers) * targets,
"pi_ndisabled": 0,
}
self.assertTrue(
self.pool.check_pool_info(**checks),
"Invalid pool information detected before rebuild")
self.assertTrue(
self.pool.check_rebuild_status(rs_errno=0, rs_state=1, rs_obj_nr=0, rs_rec_nr=0),
"Invalid pool rebuild info detected before rebuild")
# create 1st container
self.log.info(">> Creating the first container")
self.add_container(self.pool)
# start 1st mdtest run and let it complete
self.log.info(">> Running mdtest to completion")
self.execute_mdtest()
# Kill rank[6] and wait for rebuild to complete
self.log.info(">> Killing rank %s", ranks_to_kill[0])
self.server_managers[0].stop_ranks(ranks_to_kill[0], self.d_log, force=True)
self.log.info(">> Waiting for rebuild to complete after killing rank %s", ranks_to_kill[0])
self.pool.wait_for_rebuild_to_start()
self.pool.wait_for_rebuild_to_end(interval=1)
# create 2nd container
self.log.info(">> Creating the second container")
self.add_container(self.pool)
# start 2nd mdtest job in the background
self.log.info(">> Running the first mdtest job in the background")
self.subprocess = True
self.execute_mdtest()
# Kill rank[5] in the middle of mdtest run and wait for rebuild to complete
time.sleep(3)
self.log.info(">> Killing rank %s", ranks_to_kill[1])
self.server_managers[0].stop_ranks(ranks_to_kill[1], self.d_log, force=True)
self.log.info(">> Waiting for rebuild to complete after killing rank %s", ranks_to_kill[1])
self.pool.wait_for_rebuild_to_start()
self.pool.wait_for_rebuild_to_end(interval=1)
# wait for mdtest to complete successfully
self.log.info(">> Waiting for the first background mdtest job to complete")
mdtest_returncode = self.job_manager.process.wait()
if mdtest_returncode != 0:
self.fail("mdtest failed")
# create 3rd container
self.log.info(">> Creating the third container")
self.add_container(self.pool)
# start 3rd mdtest job in the background
self.log.info(">> Running a second mdtest job in the background")
self.execute_mdtest()
# Kill 2 server ranks [3,4] during mdtest and wait for rebuild to complete
time.sleep(3)
self.log.info(">> Killing rank %s", ranks_to_kill[2])
self.server_managers[0].stop_ranks(ranks_to_kill[2], self.d_log, force=True)
self.log.info(">> Waiting for rebuild to complete after killing rank %s", ranks_to_kill[2])
self.pool.wait_for_rebuild_to_start()
self.pool.wait_for_rebuild_to_end(interval=1)
# wait for mdtest to complete successfully
self.log.info(">> Waiting for the second background mdtest job to complete")
mdtest_returncode = self.job_manager.process.wait()
if mdtest_returncode != 0:
self.fail("mdtest failed")
self.log.info("Test passed!")
|
# importing required modules
from keras.applications import InceptionV3
from keras.models import Model
from keras.models import Sequential
from keras.layers import Activation, Dense
import keras
# creating bottleneck model
original_model = InceptionV3()
bottleneck_input = original_model.get_layer(index=0).input
bottleneck_output = original_model.get_layer(index=-2).output
bottleneck_model = Model(inputs=bottleneck_input, outputs=bottleneck_output)
# setting the weights of bottleneck model to be constant
for layer in bottleneck_model.layers:
layer.trainable = False
# Creating a sequential model that uses pretrained model (bottleneck_model)
new_model = Sequential()
new_model.add(bottleneck_model)
new_model.add(Dense(4, activation='softmax', input_dim=2048))
# TODO: need to make it work for multi-class problem
# For a binary classification problem
new_model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# TODO: labels and input needed
# TODO: Datapreprocessing needed here
one_hot_labels = keras.utils.to_categorical(labels, num_classes=2)
new_model.fit(processed_imgs_array,
one_hot_labels,
epochs=2,
batch_size=32) |
x = [1,2,3,4,5]
sum =0
for each in x:
sum += each
print(sum)
if(sum <= 15):
print("The answer is correct")
else:
print("Incorrect")
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 25 19:56:58 2018
@author: Dillon
"""
import numpy as np
import pandas as pd
import re
primary_dict = {2007:pd.to_datetime('8/28/08'),
2008:pd.to_datetime('8/28/08'),
2009:pd.to_datetime('9/02/10'),
2010:pd.to_datetime('9/02/10'),
2011:pd.to_datetime('8/30/12'),
2012:pd.to_datetime('8/30/12'),
2013:pd.to_datetime('8/28/14'),
2014:pd.to_datetime('8/28/14'),
2015:pd.to_datetime('9/01/16'),
2016:pd.to_datetime('9/01/16')}
mod_purp_dict = [
'Candidate Expenses',
'Contributions',
'Field Expenses',
'Fund Raiser',
'Mail',
'Media',
'Printing and Campaign Materials',
'Rent and Other Office expenses',
'Salaries and Other compensation',
'smol']
contrib_dict = {
'Self (Candidate)': 'Self',
'Business/Group/Organization': 'PAC',
'Political Committee': 'PAC',
'Total of Contributions not exceeding $100': 'sub_100',
'Candidate Committee':'Candidate_Committee',
'Individual':'Individual',
'PAC Committee':'PAC',
'Out-of-State or Federal Committee':'PAC',
'Labor Union':'PAC',
'Political Action Committee':'PAC',
'Dem or Rep National Sub-Committees':'National_Sub_Committees',
'Non-Profit Organization':'PAC' ,
'Ind_DE':'Ind_DE',
'CF_ID':'CF_ID',
'Contrib_Total':'Contrib_Total',
'Total of Expenditures not exceeding $100':'sub_100',
'3rd Party Advertiser' : 'PAC'
}
expend_dict = {
'Contributions':'Contributions',
'Non-Candidate Loan Payment': 'Non_Cand_Loan',
'Other Expenses': 'Other',
'Fund Raiser':'Fund_Raiser',
'Field Expenses ': 'Field_Expenses',
'Media':'Media',
'Postage':'Postage',
'Salaries and Other compensation':'Staff',
'Rent and Other Office expenses': 'Rent_Office_Expense',
'Reimburse':'Reimburse',
'Printing and Campaign Materials ': 'Printing',
'Total of Expenditures not exceeding $100': 'Small_Expend',
'Candidate Loan Payment':'Cand_Load',
'Direct Mailing by Mail House (R)': 'Mail_House',
'Debts Incurred Paid':'Debts',
'In-Kind':'In-Kind',
'Transfer': 'Transfer',
'Data Conversion':'Conversion',
'Return Contributions':'Return',
'Purchase of Equipment':'Equipment',
'Independent Expenditures':'Independent_Expenditures',
'Interest':'Interest'
}
purp_dict= {
'Data Conversion': 'X',
'Field Expenses ':'Field Expenses',
'Rent and Other Office expenses':'Rent and Other Office expenses',
'Fund Raiser': 'Fund Raiser',
'Purchase of Equipment':'Rent and Other Office expenses',
'In-Kind':'Contributions',
'Media':'Media',
'Postage':'Mail',
'Printing and Campaign Materials ':'Printing and Campaign Materials',
'Salaries and Other compensation': 'Salaries and Other compensation',
'Expense Reimbursement':'Candidate Expenses',
'Contribution to Committee':'Contributions',
'Fundraiser -General Expenses': 'Fund Raiser',
'Fundraiser - Entertainment': 'Fund Raiser',
'Phone Bank':'Field Expenses',
'Consulting Fees - Media':'Media',
'Postage ':'Mail',
'Fundraiser - Food & Beverage': 'Fund Raiser',
'Meeting Expenses ':'Rent and Other Office expenses',
'Bank Charges':'Fund Raiser',
'Billboards / Outdoor Advertising':'Media',
'Media - Newspaper':'Media',
'Book/Brochure Advertising':'Media',
'Candidate Expense-Ballot Fee':'Candidate Expenses',
'Wages - Campaign Staff': 'Salaries and Other compensation',
'Office Supplies':'Rent and Other Office expenses',
'Media - Phones / Robo calls':'Media',
'Contribution to federal committee':'Contributions',
'Printing - Brochures':'Printing and Campaign Materials',
'Volunteer Meals':'Field Expenses',
'Media - Online Advertising':'Media',
'Total of Expenditures not exceeding $100':'smol',
'Printing Give away items (buttons bumper stickers t-shirts)':'Printing and Campaign Materials',
'Event or Fair Booth Expenses':'Field Expenses',
'Media - Billboards / Outdoor Advertising':'Media',
'Utilities - Phone / Cell Phone ':'Rent and Other Office expenses',
'Credit Card Service Processing Charges':'Fund Raiser',
'Wages - Campaign Manager': 'Salaries and Other compensation',
'Media - TV':'Media',
'Office Rent':'Rent and Other Office expenses',
'Utilities - Electrical ':'Rent and Other Office expenses',
'Mailing Service':'Mail',
'Printing - Copies':'Printing and Campaign Materials',
'Consulting Fees - General': 'Salaries and Other compensation',
'Mailing List':'Mail',
'Media - Graphic Design':'Media',
'Fundraiser - Hall Rental': 'Fund Raiser',
'Media - Radio':'Media',
'Printing - Yard Signs':'Printing and Campaign Materials',
'Consultant Fees- Campaign workers': 'Salaries and Other compensation',
'Payroll Company Management Expense':'Salaries and Other compensation',
'Wages - Employment Taxes': 'Salaries and Other compensation',
'Utilities - Internet Access ':'Rent and Other Office expenses',
'Fair Expenses':'Field Expenses',
'For Close Out Only-Charitable Donation':'Contributions',
'Printing Misc. (buttons bumper stickers t-shirts)':'Printing and Campaign Materials',
'Survey/Polls':'Field Expenses',
'Media - Website Development':'Media',
'Transfer to Other Registered political Committees':'Contributions',
'Staff - Mileage': 'Salaries and Other compensation',
'Fundraiser - Auction Item': 'Fund Raiser',
'Election -Day workers':'Field Expenses',
'Staff - Travel': 'Salaries and Other compensation',
'IT - Campaign Software':'Field Expenses',
'Staff - Parking': 'Salaries and Other compensation',
'Staff - Lodging': 'Salaries and Other compensation',
'IT - Campaign IT Maintenance':'Rent and Other Office expenses',
'Legal Fees - General':'Rent and Other Office expenses',
'Staff - Gas ': 'Salaries and Other compensation',
'IT - Campaign Computer Equip':'Rent and Other Office expenses',
'Gifts':'Candidate Expenses',
'Legal Fees - Compliance/Administrative':'Rent and Other Office expenses',
'Utilities - Gas ':'Rent and Other Office expenses',
'Office Furniture':'Rent and Other Office expenses',
'Office - Campaign Office Maintenance':'Rent and Other Office expenses',
'Professional - Accounting':'Rent and Other Office expenses',
'Staff - Employee Benefits Costs': 'Salaries and Other compensation',
'Legal Fees - Campaign Election Relates':'Rent and Other Office expenses',
'Media - Videos':'Media',
'Media - Book/Brochure Advertising':'Media',
'Media – Videos':'Media',
'Media – Book/Brochure Advertising':'Media',
'Research - Survey':'Field Expenses',
'Candidate Expenses - Travel':'Candidate Expenses',
'Income Tax (Interest Income)': 'Salaries and Other compensation',
'Staff - Insurance': 'Salaries and Other compensation',
'Candidate Expenses - Meals':'Candidate Expenses',
'Phones / Robo calls':'Media',
'Online Advertising':'Media',
'Graphic Design':'Media',
'Tickets to Events':'Field Expenses',
'CF_ID':'CF_ID',
'Expend_Total':'Expend_Total',
'Other':'X',
'Other Expenses':'X',
'Transfer':'Candidate Expenses',
'Reimburse':'Field Expenses'}
purp_dict2= {'Bank Charges':'Fund Raiser',
'Mailing List': 'Mail',
'Utilities ': 'Rent and Other Office expenses',
'Contribution to Committee':'Contributions',
'Wages ':'Salaries and Other compensation',
'Meeting Expenses ':'Candidate Expenses',
'Event or Fair Booth Expenses':'Field Expenses',
'Total of Expenditures not exceeding $100':'smol',
'Other':'Other',
'Postage':'Mail',
'Fundraiser ':'Fund Raiser',
'Fund Raiser':'Fund Raiser',
'Book/Brochure Advertising':'Printing and Campaign Materials',
'Media ':'Media',
'Mail':'Mail',
'Salaries and Other compensation':'Salaries and Other compensation',
'Contributions':'Contributions',
'Rent and Other Office expenses':'Rent and Other Office expenses',
'Election ':'Candidate Expenses',
'Printing ':'Printing and Campaign Materials',
'Mailing Service':'Mail',
'Candidate Expenses':'Candidate Expenses',
'Field Expenses':'Field Expenses',
'smol':'smol',
'Media':'Media',
'Printing and Campaign Materials':'Printing and Campaign Materials',
'Billboards / Outdoor Advertising':'Media',
'Printing Give away items (buttons bumper stickers t':'Printing and Campaign Materials',
'IT ':'Rent and Other Office expenses',
'Office Supplies':'Rent and Other Office expenses',
'Consultant Fees':'Salaries and Other compensation',
'Volunteer Meals':'Field Expenses',
'Consulting Fees ':'Salaries and Other compensation',
'Credit Card Service Processing Charges':'Fund Raiser',
'Legal Fees ':'Rent and Other Office expenses',
'Office Rent':'Rent and Other Office expenses',
'Printing Misc. (buttons bumper stickers t':'Printing and Campaign Materials',
'Contribution to federal committee':'Contributions',
'Candidate Expense':'Candidate Expenses',
'Office ':'Rent and Other Office expenses',
'Staff ' :'Salaries and Other compensation',
'Transfer to Other Registered political Committees':'Contributions',
'Fair Expenses':'Field Expenses' ,
'Payroll Company Management Expense':'Salaries and Other compensation',
'Data Conversion':'Data Conversion',
'Phone Bank':'Field Expenses',
'For Close Out Only' :'Field Expenses',
'Office Furniture' :'Rent and Other Office expenses',
'Professional ':'Salaries and Other compensation',
'Income Tax (Interest Income)':'Salaries and Other compensation',
'Survey/Polls' :'Field Expenses',
'Research ':'Field Expenses'}
period_dict = {
'2016 30 Day Primary':13,
'2015 Annual':12,
'2014 8 Day Primary':11,
'2014 30 Day Primary':10,
'2013 Annual':9,
'2011 Annual':6,
'2012 8 Day Primary':8,
'2012 30 Day Primary':7,
'2009 Annual':3,
'2010 30 Day Primary':4,
'2010 8 Day Primary':2,
'2008 8 Day Primary':2,
'2008 30 Day Primary':1,
'2007 Annual':0,
'2016 8 Day Primary':14}
period_18_dict ={
'2017 Annual':31,#31
'2018 30 Day Primary':32,#32
'2018 8 Day Primary':33,#33
'2018 30 Day General':34,#34
'2018 8 Day General':35,#35
'2018 Annual':36}
periods = pd.Series([ '2006 Annual',
'2007 Annual',
'2008 2008 Primary 01/09/2008 8 Day',
'2008 2008 General 02/04/2008 30 Day',
'2008 2008 General 11/4/2008 30 Day',
'2008 2008 General 11/4/2008 8 Day',
'2008 Annual',
'2009 Annual',
'2010 2010 Primary 09/14/2010 30 Day',
'2010 2010 Primary 09/14/2010 8 Day'
'2010 2010 General 11/02/2010 30 Day',
'2010 2010 General 11/02/2010 8 Day',
'2010 Annual',
'2011 Annual',
'2012 2012 Primary 09/11/2012 30 Day',
'2012 2012 Primary 09/11/2012 8 Day'
'2012 2012 General 11/06/2012 30 Day'
'2012 2012 General 11/06/2012 8 Day',
'2012 Annual',
'2013 Annual',
'2014 2014 Primary 09/09/2014 30 Day',
'2014 2014 Primary 09/09/2014 8 Day',
'2014 2014 General 11/4/2014 30 Day',
'2014 2014 General 11/4/2014 8 Day',
'2014 Annual',
'2015 Annual',
'2016 2016 Primary 09/13/2016 30 Day',
'2016 2016 Primary 09/13/2016 8 Day',
'2016 2016 General 11/08/2016 30 Day',
'2016 2016 General 11/08/2016 8 Day',
'2016 Annual',
'2017 Annual',
'2018 2018 Primary 09/06/2018 30 Day',
'2018 2018 Primary 09/06/2018 8 Day'
'2018 2018 General 11/06/2018 30 Day',
'2018 2018 General 11/06/2018 8 Day',
'2018 Annual'] )
periods2 = pd.Series([ '2006 Annual',
'2007 Annual', #1
'2008 8 Day Primary', #2
'2008 30 Day Primary', #3
'2008 30 Day General', #4
'2008 8 Day General', #5
'2008 Annual',#6
'2009 Annual',#7
'2010 30 Day Primary',#8
'2010 8 Day Primary',#9
'2010 30 Day General',#10
'2010 8 Day General'#11
'2010 Annual',#12
'2011 Annual',#13
'2012 30 Day Primary',#14
'2012 8 Day Primary',#15
'2012 30 Day General',#16
'2012 8 Day General'#17
'2012 Annual', #18
'2013 Annual',#19
'2014 30 Day Primary',#20
'2014 8 Day Primary',#21
'2014 30 Day General',#22
'2014 8 Day General'#23
'2014 Annual',#24
'2015 Annual',#25
'2016 30 Day Primary',#26
'2016 8 Day Primary',#27
'2016 30 Day General',#28
'2016 8 Day General', #29
'2016 Annual',#30
'2017 Annual',#31
'2018 30 Day Primary',#32
'2018 8 Day Primary',#33
'2018 30 Day General',#34
'2018 8 Day General',#35
'2018 Annual'] )#36
period_dict = {
'2008all':[1,2,3,4,5,6],
'2008p':[1,2,3],
'2010all':[7,8,9,10,11,12],
'2010p':[7,8,9],
'2012all':[13,14,15,16,17,18],
'2012p':[13,14,15],
'2014all':[19,20,21,22,23,24],
'2014p':[19,20,21],
'2016all':[25,26,27,28,29,30],
'2016p':[25,26,27],
'2018all':[31,32,33,34,35,36],
'2018p':[31,32,33],
}
office_dict = {'Governor':'GOVERNOR',
'(Governor)':'GOVERNOR',
'Lieutenant Governor':'LIEUTENANT GOVERNOR',
'(Lieutenant Governor)':'LIEUTENANT GOVERNOR',
'Insurance Commissioner':'INSURANCE COMMISSIONER',
'(Insurance Commissioner)':'INSURANCE COMMISSIONER',
'Attorney General':'ATTORNEY GENERAL',
'(Attorney General)':'ATTORNEY GENERAL',
'State Treasurer':'STATE TREASURER',
'(State Treasurer)':'STATE TREASURER',
'Auditor of Accounts':'AUDITOR OF ACCOUNTS',
'(Auditor of Accounts)':'AUDITOR OF ACCOUNTS',
'District 01 (State Senator)':'STATE SENATOR DISTRICT 1',
'District 02 (State Senator)':'STATE SENATOR DISTRICT 2',
'District 03 (State Senator)':'STATE SENATOR DISTRICT 3',
'District 04 (State Senator)':'STATE SENATOR DISTRICT 4',
'District 05 (State Senator)':'STATE SENATOR DISTRICT 5',
'District 06 (State Senator)':'STATE SENATOR DISTRICT 6',
'District 07 (State Senator)':'STATE SENATOR DISTRICT 7',
'District 08 (State Senator)':'STATE SENATOR DISTRICT 8',
'District 09 (State Senator)':'STATE SENATOR DISTRICT 9',
'District 10 (State Senator)':'STATE SENATOR DISTRICT 10',
'District 11 (State Senator)':'STATE SENATOR DISTRICT 11',
'District 12 (State Senator)':'STATE SENATOR DISTRICT 12',
'District 13 (State Senator)':'STATE SENATOR DISTRICT 13',
'District 14 (State Senator)':'STATE SENATOR DISTRICT 14',
'District 15 (State Senator)':'STATE SENATOR DISTRICT 15',
'District 16 (State Senator)':'STATE SENATOR DISTRICT 16',
'District 17 (State Senator)':'STATE SENATOR DISTRICT 17',
'District 18 (State Senator)': 'STATE SENATOR DISTRICT 18',
'District 19 (State Senator)':'STATE SENATOR DISTRICT 19',
'District 20 (State Senator)':'STATE SENATOR DISTRICT 20',
'District 21 (State Senator)':'STATE SENATOR DISTRICT 21',
'District 01 (State Representative)':'STATE REPRESENTATIVE DISTRICT 1',
'District 02 (State Representative)':'STATE REPRESENTATIVE DISTRICT 2',
'District 03 (State Representative)':'STATE REPRESENTATIVE DISTRICT 3',
'District 04 (State Representative)':'STATE REPRESENTATIVE DISTRICT 4',
'District 05 (State Representative)':'STATE REPRESENTATIVE DISTRICT 5',
'District 06 (State Representative)':'STATE REPRESENTATIVE DISTRICT 6',
'District 07 (State Representative)':'STATE REPRESENTATIVE DISTRICT 7',
'District 08 (State Representative)':'STATE REPRESENTATIVE DISTRICT 8',
'District 09 (State Representative)':'STATE REPRESENTATIVE DISTRICT 9',
'District 10 (State Representative)':'STATE REPRESENTATIVE DISTRICT 10',
'District 11 (State Representative)':'STATE REPRESENTATIVE DISTRICT 11',
'District 12 (State Representative)':'STATE REPRESENTATIVE DISTRICT 12',
'District 13 (State Representative)':'STATE REPRESENTATIVE DISTRICT 13',
'District 14 (State Representative)':'STATE REPRESENTATIVE DISTRICT 14',
'District 15 (State Representative)':'STATE REPRESENTATIVE DISTRICT 15',
'District 16 (State Representative)':'STATE REPRESENTATIVE DISTRICT 16',
'District 17 (State Representative)':'STATE REPRESENTATIVE DISTRICT 17',
'District 18 (State Representative)':'STATE REPRESENTATIVE DISTRICT 18',
'District 19 (State Representative)':'STATE REPRESENTATIVE DISTRICT 19',
'District 20 (State Representative)':'STATE REPRESENTATIVE DISTRICT 20',
'District 21 (State Representative)':'STATE REPRESENTATIVE DISTRICT 21',
'District 22 (State Representative)':'STATE REPRESENTATIVE DISTRICT 22',
'District 23 (State Representative)':'STATE REPRESENTATIVE DISTRICT 23',
'District 24 (State Representative)':'STATE REPRESENTATIVE DISTRICT 24',
'District 25 (State Representative)':'STATE REPRESENTATIVE DISTRICT 25',
'District 26 (State Representative)':'STATE REPRESENTATIVE DISTRICT 26',
'District 27 (State Representative)':'STATE REPRESENTATIVE DISTRICT 27',
'District 28 (State Representative)':'STATE REPRESENTATIVE DISTRICT 28',
'District 29 (State Representative)':'STATE REPRESENTATIVE DISTRICT 29',
'District 30 (State Representative)':'STATE REPRESENTATIVE DISTRICT 30',
'District 31 (State Representative)':'STATE REPRESENTATIVE DISTRICT 31',
'District 32 (State Representative)':'STATE REPRESENTATIVE DISTRICT 32',
'District 33 (State Representative)':'STATE REPRESENTATIVE DISTRICT 33',
'District 34 (State Representative)':'STATE REPRESENTATIVE DISTRICT 34',
'District 35 (State Representative)':'STATE REPRESENTATIVE DISTRICT 35',
'District 36 (State Representative)':'STATE REPRESENTATIVE DISTRICT 36',
'District 37 (State Representative)':'STATE REPRESENTATIVE DISTRICT 37',
'District 38 (State Representative)':'STATE REPRESENTATIVE DISTRICT 38',
'District 39 (State Representative)':'STATE REPRESENTATIVE DISTRICT 39',
'District 40 (State Representative)':'STATE REPRESENTATIVE DISTRICT 40',
'District 41 (State Representative)':'STATE REPRESENTATIVE DISTRICT 41',
'County Executive':'COUNTY EXECUTIVE (N)',
'(County Executive)':'COUNTY EXECUTIVE (N)',
'President of County Council':'PRESIDENT OF COUNTY COUNCIL (N)',
'(President of County Council)':'PRESIDENT OF COUNTY COUNCIL (N)',
'District 06 (County Council)':'COUNTY COUNCIL DISTRICT 6 (N)',
'District 07 (County Council)':'COUNTY COUNCIL DISTRICT 7 (N)',
'District 08 (County Council)':'COUNTY COUNCIL DISTRICT 8 (N)',
'District 09 (County Council)':'COUNTY COUNCIL DISTRICT 9 (N)',
'District 10 (County Council)':'COUNTY COUNCIL DISTRICT 10 (N)',
'District 11 (County Council)':'COUNTY COUNCIL DISTRICT 11 (N)',
'District 12 (County Council)': 'COUNTY COUNCIL DISTRICT 12 (N)',
'Mayor':'MAYOR',
'(Mayor)':'MAYOR',
'President of City Council':'PRESIDENT OF CITY COUNCIL',
'(President of City Council)':'PRESIDENT OF CITY COUNCIL',
'District 01 (City Treasurer)':'CITY TREASURER',
'District 01 City Treasurer':'CITY TREASURER',
'At Large (City Council)':'CITY COUNCIL AT LARGE',
'At large (City Council)':'CITY COUNCIL AT LARGE',
'District 01 City Council':'CITY COUNCIL DISTRICT 1',
'District 01 (City Council)':'CITY COUNCIL DISTRICT 1',
'District 02 City Council':'CITY COUNCIL DISTRICT 2',
'District 02 (City Council)':'CITY COUNCIL DISTRICT 2',
'District 03 City Council':'CITY COUNCIL DISTRICT 3',
'District 03 (City Council)':'CITY COUNCIL DISTRICT 3',
'District 04 City Council':'CITY COUNCIL DISTRICT 4',
'District 04 (City Council)':'CITY COUNCIL DISTRICT 4',
'District 05 City Council':'CITY COUNCIL DISTRICT 5',
'District 05 (City Council)':'CITY COUNCIL DISTRICT 5',
'District 06 City Council':'CITY COUNCIL DISTRICT 6',
'District 06 (City Council)':'CITY COUNCIL DISTRICT 6',
'District 07 City Council':'CITY COUNCIL DISTRICT 7',
'District 07 (City Council)':'CITY COUNCIL DISTRICT 7',
'District 08 City Council':'CITY COUNCIL DISTRICT 8',
'District 08 (City Council)':'CITY COUNCIL DISTRICT 8',
'Levy Court Commissioner at Large': 'LEVY COURT AT LARGE (K)',
'(Levy Court Commissioner at Large)': 'LEVY COURT AT LARGE (K)',
'District 01 (District Levy Court Commissioner)':'1ST LEVY COURT DISTRICT (K)',
'District 02 (District Levy Court Commissioner)':'2ND LEVY COURT DISTRICT (K)',
'District 03 (District Levy Court Commissioner)':'3RD LEVY COURT DISTRICT (K)',
'District 04 (District Levy Court Commissioner)':'4TH LEVY COURT DISTRICT (K)',
'District 05 (District Levy Court Commissioner)':'5TH LEVY COURT DISTRICT (K)',
'District 06 (District Levy Court Commissioner)':'6TH LEVY COURT DISTRICT (K)',
'Commissioner':'X',
'District 01 (Council Member)':'X',
'Clerk of Peace': 'CLERK OF PEACE',
'Recorder of Deeds':'RECORDER OF DEEDS',
'(Recorder of Deeds)':'RECORDER OF DEEDS',
'School Board Member':'X',
'Register of Wills': 'REGISTER OF WILLS',
'(Register of Wills)': 'REGISTER OF WILLS',
'Comptroller':'X',
'District 01 (County Council)':'District 01 (County Council)',
'District 02 (County Council)':'District 02 (County Council)',
'District 03 (County Council)':'District 03 (County Council)',
'District 04 (County Council)':'District 04 (County Council)',
'District 05 (County Council)':'District 05 (County Council)',
'Sheriff':'SHERIFF',
'District 06 Council Person':'X',
'District 01 Council Person':'X',
'District 03 (Council Member)':'X',
'District 02 (Council Member)':'X',
'District 05 Council Person':'X',
'District 04 (Council Member)':'X',
'(School Board Member)':'X',
'(Sheriff)':'SHERIFF',
'(Clerk of Peace)': 'CLERK OF PEACE',
'District 05 (Council Person)':'X',
'(Commissioner)':'X',
'(City Council)':'X',
'District E (School Board Member)':'X',
'District B (School Board Member)':'X',
'(Mayor_)':'X',
'At Large (School Board Member)':'X',
'District C (School Board Member)':'X',
'District G (School Board Member)':'X'}
type_dict = dict([(1, 'all'), (0, 'p')])
offices = [
'UNITED STATES SENATOR (CLASS 1)',
'UNITED STATES SENATOR (CLASS 2)',
'REPRESENTATIVE IN CONGRESS',
'GOVERNOR',
'LIEUTENANT GOVERNOR',
'INSURANCE COMMISSIONER',
'ATTORNEY GENERAL',
'STATE TREASURER',
'AUDITOR OF ACCOUNTS',
'STATE SENATOR DISTRICT 1',
'STATE SENATOR DISTRICT 2',
'STATE SENATOR DISTRICT 3',
'STATE SENATOR DISTRICT 4',
'STATE SENATOR DISTRICT 5',
'STATE SENATOR DISTRICT 6',
'STATE SENATOR DISTRICT 7',
'STATE SENATOR DISTRICT 8',
'STATE SENATOR DISTRICT 9',
'STATE SENATOR DISTRICT 10',
'STATE SENATOR DISTRICT 11',
'STATE SENATOR DISTRICT 12',
'STATE SENATOR DISTRICT 13',
'STATE SENATOR DISTRICT 14',
'STATE SENATOR DISTRICT 15',
'STATE SENATOR DISTRICT 16',
'STATE SENATOR DISTRICT 17',
'STATE SENATOR DISTRICT 18',
'STATE SENATOR DISTRICT 19',
'STATE SENATOR DISTRICT 20',
'STATE SENATOR DISTRICT 21',
'STATE REPRESENTATIVE DISTRICT 1',
'STATE REPRESENTATIVE DISTRICT 2',
'STATE REPRESENTATIVE DISTRICT 3',
'STATE REPRESENTATIVE DISTRICT 4',
'STATE REPRESENTATIVE DISTRICT 5',
'STATE REPRESENTATIVE DISTRICT 6',
'STATE REPRESENTATIVE DISTRICT 7',
'STATE REPRESENTATIVE DISTRICT 8',
'STATE REPRESENTATIVE DISTRICT 9',
'STATE REPRESENTATIVE DISTRICT 10',
'STATE REPRESENTATIVE DISTRICT 11',
'STATE REPRESENTATIVE DISTRICT 12',
'STATE REPRESENTATIVE DISTRICT 13',
'STATE REPRESENTATIVE DISTRICT 14',
'STATE REPRESENTATIVE DISTRICT 15',
'STATE REPRESENTATIVE DISTRICT 16',
'STATE REPRESENTATIVE DISTRICT 17',
'STATE REPRESENTATIVE DISTRICT 18',
'STATE REPRESENTATIVE DISTRICT 19',
'STATE REPRESENTATIVE DISTRICT 20',
'STATE REPRESENTATIVE DISTRICT 21',
'STATE REPRESENTATIVE DISTRICT 22',
'STATE REPRESENTATIVE DISTRICT 23',
'STATE REPRESENTATIVE DISTRICT 24',
'STATE REPRESENTATIVE DISTRICT 25',
'STATE REPRESENTATIVE DISTRICT 26',
'STATE REPRESENTATIVE DISTRICT 27',
'STATE REPRESENTATIVE DISTRICT 28',
'STATE REPRESENTATIVE DISTRICT 29',
'STATE REPRESENTATIVE DISTRICT 30',
'STATE REPRESENTATIVE DISTRICT 31',
'STATE REPRESENTATIVE DISTRICT 32',
'STATE REPRESENTATIVE DISTRICT 33',
'STATE REPRESENTATIVE DISTRICT 34',
'STATE REPRESENTATIVE DISTRICT 35',
'STATE REPRESENTATIVE DISTRICT 36',
'STATE REPRESENTATIVE DISTRICT 37',
'STATE REPRESENTATIVE DISTRICT 38',
'STATE REPRESENTATIVE DISTRICT 39',
'STATE REPRESENTATIVE DISTRICT 40',
'STATE REPRESENTATIVE DISTRICT 41',
'COUNTY EXECUTIVE (N)',
'PRESIDENT OF COUNTY COUNCIL (N)',
'COUNTY COUNCIL DISTRICT 1 (N)',
'COUNTY COUNCIL DISTRICT 2 (N)',
'COUNTY COUNCIL DISTRICT 3 (N)',
'COUNTY COUNCIL DISTRICT 4 (N)',
'COUNTY COUNCIL DISTRICT 5 (N)',
'COUNTY COUNCIL DISTRICT 6 (N)',
'COUNTY COUNCIL DISTRICT 7 (N)',
'COUNTY COUNCIL DISTRICT 8 (N)',
'COUNTY COUNCIL DISTRICT 9 (N)',
'COUNTY COUNCIL DISTRICT 10 (N)',
'COUNTY COUNCIL DISTRICT 11 (N)',
'COUNTY COUNCIL DISTRICT 12 (N)',
'CLERK OF THE PEACE (N)',
'REGISTER OF WILLS (N)',
'RECORDER OF DEEDS (N)',
'SHERIFF (N)',
'MAYOR',
'PRESIDENT OF CITY COUNCIL',
'CITY TREASURER',
'CITY COUNCIL AT LARGE',
'CITY COUNCIL DISTRICT 1',
'CITY COUNCIL DISTRICT 2',
'CITY COUNCIL DISTRICT 3',
'CITY COUNCIL DISTRICT 4',
'CITY COUNCIL DISTRICT 5',
'CITY COUNCIL DISTRICT 6',
'CITY COUNCIL DISTRICT 7',
'CITY COUNCIL DISTRICT 8',
'LEVY COURT AT LARGE (K)',
'1ST LEVY COURT DISTRICT (K)',
'2ND LEVY COURT DISTRICT (K)',
'3RD LEVY COURT DISTRICT (K)',
'4TH LEVY COURT DISTRICT (K)',
'5TH LEVY COURT DISTRICT (K)',
'6TH LEVY COURT DISTRICT (K)',
'CLERK OF THE PEACE (K)',
'REGISTER OF WILLS (K)',
'RECORDER OF DEEDS (K)',
'SHERIFF (K)',
'COUNTY COUNCIL DISTRICT 1 (X)',
'COUNTY COUNCIL DISTRICT 2 (X)',
'COUNTY COUNCIL DISTRICT 3 (X)',
'COUNTY COUNCIL DISTRICT 4 (X)',
'COUNTY COUNCIL DISTRICT 5 (X)',
'CLERK OF THE PEACE (X)',
'REGISTER OF WILLS (X)',
'RECORDER OF DEEDS (X)',
'SHERIFF (X)'] |
import numpy as np
import numba
import collections
import sklearn as sk
from scipy.sparse import issparse, csr_matrix, coo_matrix
from sklearn.utils import check_array, check_random_state
import fuc
import math
@numba.njit(fastmath=True,nogil=True)#nogil:當進入這類編譯好的函數時,Numba將會釋放全局線程鎖,fastmath:減少運算時間
def SMM_e_step(a,X_rows,X_cols,X_vals,p_smm,p_smm_w,p_w_BG,probability_threshold=1e-32):
'''
X:dtf matrix NxV N:top doc數量 V:top doc內的word
p_smm:V
p_smm_w:V
p_w_BG:BG model,array index為word_id。
probability_threshold:閥值10^-32次方
'''
#norm=0.0
for w in range(p_smm.shape[0]):
fraction=(1-a)*p_smm[w]
denominator=(1-a)*p_smm[w]+a*p_w_BG[w]
p_smm_w[w]=fraction/denominator
'''
norm+=p_smm_w[w]
for i in range(len(p_smm_w)):
if norm>0:
p_smm_w[i]/=norm
'''
return p_smm_w
@numba.njit(fastmath=True,nogil=True)
def SMM_m_step(X_rows,X_cols,X_vals,p_smm,p_smm_w):
norm_psmm=0.0
p_smm[:]=0.0
for nz in range(X_vals.shape[0]):
w=X_cols[nz]
x=X_vals[nz]
sum_temp=x*p_smm_w[w]
p_smm[w]+=sum_temp
norm_psmm+=sum_temp
for i in range(len(p_smm)):
if norm_psmm>0:
p_smm[i]/=norm_psmm
return p_smm
@numba.njit(fastmath=True, nogil=True)
def log_likelihood(a,X_rows, X_cols, X_vals, p_smm, p_w_BG):
result=1.0
for nz in range(X_vals.shape[0]):
w=X_cols[nz]
x=X_vals[nz]
result*=math.pow((1-a)*p_smm[w]+a*p_w_BG[w],x)
return result
def SMM_fit(X,a,n_iter,p_w_BG):
p_smm=np.zeros(X.shape[1],dtype=np.float64)
p_smm_w=np.empty(X.shape[1],dtype=np.float64)
p_smm=np.random.random(size=(X.shape[1]))
fuc.normalize(p_smm)
X = check_array(term_doc_matrix, accept_sparse="csr")
if not issparse(X):
X = csr_matrix(X)
X = X.tocoo()
for _ in range(n_iter):
p_smm_w=SMM_e_step(a,
X.row,
X.col,
X.data,
p_smm,
p_smm_w,
p_w_BG)
p_smm=SMM_m_step(X.row,
X.col,
X.data,
p_smm,
p_smm_w)
#print(log_likelihood(a,X.row,X.col,X.data,p_smm,p_w_BG))
p_smm=p_smm.argsort()
p_smm=p_smm[::-1]
return p_smm
class SMM():
def __init__(self,n_iter=100,m=50):
self.n_iter=n_iter
self.m=m
def fit(self,X,a,p_w_BG):
p_smm=SMM_fit(X,a,self.n_iter,p_w_BG)
return p_smm
if __name__ == '__main__':
doc_list=[]
top_list=[]
query_list=[]
querys_word=[]
f=open("prefit.txt")
for line in f.readlines():
line=line.strip('\n')
top_list.append(line.split())
f.close()
f=open("query_list.txt")
for line in f.readlines():
line=line.strip('\n')
query_list.append(line)
f.close()
Q=len(query_list)
for q in range(Q):
path_f="Query/"+query_list[q]
W_list,Q_dict=fuc.getword_tf(path_f)
querys_word.append(W_list)
f=open("doc_list.txt")
for line in f.readlines():
line=line.strip('\n')
doc_list.append(line)
f.close()
f=open("BGLM.txt")
p_w_BG=[]
for line in f.readlines():
(key, val) = line.split()
val=math.exp(float(val))
p_w_BG.append(val)
p_w_BG=np.array(p_w_BG,dtype=np.float64)
for t in range(len(top_list)):
if t<9:
f=open("Query/4000"+str(t+1)+".query","a")
elif (t>=9 and t<99):
f=open("Query/400"+str(t+1)+".query","a")
else:
f=open("Query/40"+str(t+1)+".query","a")
print("q:"+str(t))
docs_word=[]
doc_w_index_dict={}
doc = lambda: collections.defaultdict(doc)
doc_dict=doc()
for nz in top_list[t]:
path_f="Document/"+doc_list[int(nz)]
W_list,D_dict,doc_w_index_dict=fuc.getword_tf_index(path_f,doc_w_index_dict,True)
docs_word.append(W_list)
doc_dict[nz]=D_dict
N=len(docs_word)
V=len(doc_w_index_dict)
term_doc_matrix=np.zeros((N,V),dtype=np.float64)
p_w_BG1=np.zeros(V,dtype=np.float64)
for n in range(len(docs_word)):
doc_norepeat_word=list({}.fromkeys(docs_word[n]).keys())
for w in doc_norepeat_word:
w_index = doc_w_index_dict.get(w,None)
if(w_index==None):
print("index=0 error"+w+" w_index:"+str(n))
count = doc_dict[top_list[t][n]].get(w,0)
term_doc_matrix[n][w_index]=count
p_w_BG1[w_index]=p_w_BG[int(w)]
p_smm=SMM().fit(term_doc_matrix,0.5,p_w_BG1)
p_smm=p_smm[:40]
p_smm.tolist()
doc_w_index_dict = {v: k for k, v in doc_w_index_dict.iteritems()}
for item in p_smm:
f.write(doc_w_index_dict.get(item,None)+" ")
f.write("\n") |
import pandas as p
d={'NAME':['Tom','Jack','Steve','Ricky'],'AGE':[28,34,29,42]}
data=p.DataFrame(d)
print(data) |
from odoo import models, fields, api, _
from datetime import date
import datetime
class student_payment_report_wiz(models.TransientModel):
_name = 'student.payment.report.wiz'
student_id = fields.Many2one('res.partner', string="Student Name")
date_from = fields.Date(string="Date From")
date_to = fields.Date(sring="Date To")
invoice_ids = fields.Many2many('account.invoice', string="Invoices")
voucher_ids = fields.Many2many('account.voucher', string="Vouchers")
date_today = fields.Date(string="Todays Date")
past_balance = fields.Float(string="Past balance")
user = fields.Many2one('res.users',string="Current User")
# running_balance_credit=fields.Float(string="Running balance after all creadit lines")
current_running_balance=fields.Float(string="Current Running balance")
total_credit_amount = fields.Float(string="Credit")
total_debit_amount = fields.Float(string="Debit")
_defaults = {'current_running_balance': 0.0}
@api.multi
def calc_current_running_balance(self, total, paid):
self.current_running_balance = self.current_running_balance + total - paid
self.current_running_balance = float("{0:.2f}".format(self.current_running_balance))
return self.current_running_balance
@api.multi
def calc_credit(self, credit):
if credit != 0.0:
self.total_credit_amount += credit
@api.multi
def calc_debit(self, debit):
if debit != 0.0:
self.total_debit_amount += debit
@api.multi
def line_details(self, invoices, vouchers, from_date, to_date):
invoice_voucher_list = []
final_inv_vouch_list = []
for inv in invoices:
inv_date = datetime.datetime.strptime(inv.date_invoice, "%Y-%m-%d").strftime("%d/%m/%Y")
invoice_voucher_list.append({'date': inv_date, 'object': 'account.invoice', 'rec_id': inv})
for vouch in vouchers:
vouch_date = datetime.datetime.strptime(vouch.date, "%Y-%m-%d").strftime("%d/%m/%Y")
invoice_voucher_list.append({'date': vouch_date, 'object': 'account.voucher', 'rec_id': vouch})
for data in invoice_voucher_list:
if 'account.invoice' in data['object']:
if data['rec_id'].type == 'out_refund':
for inv_line in data['rec_id'].invoice_line_ids:
final_inv_vouch_list.append({'date': data['date'],
'invoice_number': data['rec_id'].number,
'description': inv_line.product_id.name,
'debit': 0.0,
'credit': inv_line.price_unit})
self.calc_credit(inv_line.price_unit)
else:
for inv_line in data['rec_id'].invoice_line_ids:
final_inv_vouch_list.append({'date': data['date'],
'invoice_number': data['rec_id'].number,
'description': inv_line.product_id.name,
'debit': inv_line.price_unit,
'credit': 0.0})
self.calc_debit(inv_line.price_unit)
for pay_line in data['rec_id'].payment_ids:
pay_date = datetime.datetime.strptime(pay_line.date, "%Y-%m-%d").strftime("%d/%m/%Y")
flag = False
for move_line in pay_line.move_id.line_id:
if move_line.debit == 0.00 and move_line.credit == 0.00:
flag = True
if flag:
for move_line in pay_line.move_id.line_id:
if pay_line.date >= from_date and pay_line.date <= to_date:
final_inv_vouch_list.append({'date': pay_date,
'invoice_number': data['rec_id'].number,
'description': pay_line.ref,
'debit': move_line.debit,
'credit': move_line.credit})
if move_line.debit != 0.00:
self.calc_debit(move_line.debit)
if move_line.credit != 0.00:
self.calc_credit(move_line.credit)
if not flag:
voucher = False
for move_line in pay_line.move_id.line_id:
if move_line.reconcile_ref and move_line.name:
voucher = True
if voucher:
continue
else:
if pay_line.date >= from_date and pay_line.date <= to_date:
final_inv_vouch_list.append({'date': pay_date,
'invoice_number': data['rec_id'].number,
'description': pay_line.ref,
'debit': 0.0,
'credit': pay_line.credit})
self.calc_credit(pay_line.credit)
elif 'account.voucher' in data['object']:
if len(data['rec_id'].line_cr_ids) == 0 or (len(data['rec_id'].line_cr_ids) != 0 and data['rec_id'].amount != 0.0):
final_inv_vouch_list.append({'date': data['date'],
'invoice_number': 'Advance Payment',
'description': data['rec_id'].number,
'debit': 0.0,
'credit': data['rec_id'].amount})
self.calc_credit(data['rec_id'].amount)
pdc_rec = self.env['pdc.detail'].search([('voucher_id','=',data['rec_id'].id)])
#code to add refund pdc entry on student report
final_inv_vouch_list.append({'date': data['date'],
'invoice_number': 'Refund Advance Payment',
'description': data['rec_id'].number,
'credit': 0.0,
'debit': data['rec_id'].amount})
self.calc_debit(data['rec_id'].amount)
final_inv_vouch_list.sort(key=lambda x: datetime.datetime.strptime(x['date'], '%d/%m/%Y'))
return final_inv_vouch_list
@api.multi
def open_report(self):
invoice_ids = self.env['account.invoice'].search([('partner_id', '=', self.student_id.id), ('state', 'in', ['open', 'paid']), ('date_invoice', '>=', self.date_from), ('date_invoice', '<=', self.date_to)])
voucher_ids = self.env['account.voucher'].search([('partner_id', '=', self.student_id.id), ('state', '=', 'posted'), ('date', '>=', self.date_from), ('date', '<=', self.date_to)])
past_balance = 0.0
move_line_ids = self.env['account.move.line'].search([('partner_id', '=', self.student_id.id), ('date', '<', self.date_from), ('journal_id.type', '!=', 'situation'), '|', ('account_id.type', '=', 'receivable'), ('account_id.type', '=', 'payable')])
for move_line in move_line_ids:
if move_line.credit == 0.0 and move_line.debit == 0.0:
continue
elif move_line.credit != 0.0:
past_balance -= move_line.credit
elif move_line.debit != 0.0:
past_balance += move_line.debit
self.user = self._uid
self.past_balance = past_balance
# self.invoice_before_from=[(6,0,invoice_before_from.ids)]
self.invoice_ids = [(6, 0, invoice_ids.ids)]
self.voucher_ids = [(6, 0, voucher_ids.ids)]
self.date_today = date.today()
# running_bal=0+self.past_balance
# for each in self.invoice_ids:
# for line in each.invoice_line:
# running_bal=running_bal+line.price_subtotal
#
# self.running_balance_credit=running_bal
value = {
'type': 'ir.actions.report.xml',
'report_name': 'edsys_edu_fee.report_student_payment',
'datas': {
'model': 'student.payment.report.wiz',
'id': self.id,
'ids': [self.id],
'report_type': 'pdf',
'report_file': 'edsys_edu_fee.report_student_payment'
},
'name': self.student_id.student_id+'_ Payment Report' ,
'nodestroy': True
}
return value
@api.multi
def send_student_report(self):
if self.student_id:
email = self.student_id.parents1_id.parents_email
mail_obj=self.env['mail.mail']
email_server=self.env['ir.mail_server']
email_sender=email_server.search([])
ir_model_data = self.env['ir.model.data']
template_id = ir_model_data.get_object_reference('edsys_edu_fee', 'email_template_send_student_report_by_email')[1]
template_rec = self.env['mail.template'].browse(template_id)
template_rec.write({'email_to' : email,'email_from':email_sender.smtp_user, 'email_cc': ''})
template_rec.send_mail(self.id, force_send=True)
|
#!/usr/bin/env python2
from collections import defaultdict
import logging
import json
import os
from os import path
import sys
d = sys.argv[1]
allocated = set()
builders = defaultdict(set)
machines = defaultdict(set)
allocated.update(json.load(open(path.join(d, "allocated", "all")))["machines"])
for m in os.listdir(path.join(d, "machines")):
for b in json.load(open(path.join(d, "machines", m)))["builders"]:
machines[m].add(b)
for b in os.listdir(path.join(d, "builders")):
for m in json.load(open(path.join(d, "builders", b)))["machines"]:
builders[b].add(m)
# Verify that every the builder allocations match the machines'.
builder_machines = set()
for m in machines:
if b in machines[m]:
builder_machines.add(m)
if builders[b] != builder_machines:
logging.warning("Mismatch in builder machines for %s: %s", b, sorted(builders[b].difference(builder_machines)))
# Verify that machine allocations match the builders', and also obey our other constraints
for m in machines:
machine_builders = set()
for b in builders:
if m in builders[b]:
machine_builders.add(b)
if machines[m] != machine_builders:
logging.warning("Mismatch in machine builders for %s: %s", m, sorted(machines[m].difference(machine_builders)))
if len(machine_builders) > 2:
logging.warning("%s has more than 2 builders", m)
# And verify that all of the machines in machines/ are listed in the list of allocated machines
all_machines = set(machines.keys())
if allocated != all_machines:
logging.warning("Mismatch in allocated/all vs. machines/*: %s, %s", sorted(allocated.difference(all_machines)), sorted(all_machines.difference(allocated)))
for m in sorted(machines):
print "%s is allocated to %s builders" % (m, len(machines[m]))
for b in sorted(builders):
print "%s has %s machines allocated to it" % (b, len(builders[b]))
|
import cv2 as cv
I = cv.VideoCapture(0)
nb_frames = 30 # number of frames used to initialize the background models
de_th = 0.9 # Threshold value, above which it is marked foreground, else background.
gmgSubtractor = cv.bgsegm.createBackgroundSubtractorGMG(nb_frames,de_th)
while(1):
ret, frame = I.read()
frame = cv.GaussianBlur(frame,(5,5),0)
fgmask = gmgSubtractor.apply(frame)
cv.imshow('frame',frame)
cv.imshow('fgmask',fgmask)
k = cv.waitKey(20) & 0xff
if k == 27:
break
I.release()
cv.destroyAllWindows() |
# Generated by Django 3.2.3 on 2021-06-21 05:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('brand', '0001_initial'),
('category', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('display_name', models.CharField(blank=True, max_length=255, null=True)),
('slug', models.SlugField(blank=True, default='', editable=False, max_length=255, unique_for_date='created')),
('desc', models.TextField(blank=True, null=True)),
('spec', models.TextField(blank=True, null=True)),
('is_active', models.BooleanField(default=False)),
('is_featured', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('brand', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='brand.brand')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='category.category')),
],
),
]
|
while True:
girdi=input("Sayı girin (Çıkmak için 'q' ya basın.) : ")
if girdi=="q":
break
uzunluk=len(girdi)
toplam=0
for i in range(uzunluk):
toplam = toplam + int(girdi[i])**uzunluk
if(toplam==int(girdi)):
print("Girdiğiniz Sayı Bir Armstrong Sayıdır!")
else:
print("Girdiğiniz Sayı Armstrong Bir Sayı Değildir! ")
|
#print the last digit of a given number:
num=int(input("enter the number:"))
num2=int(input("enter the number:"))
if num%10== num2:
print(num2)
else:
print("wrong")
|
import math
n = 1000 #2부터 1000까지의 모든 수에 대하여 소수 판별
# 처음엔 모든 수가 소수인 것으로 초기화 (0과 1은 제외)
array = [True for _ in range(n+1)]
# 에라토스테네스의 체 알고리즘 수행
# 2부터 n의 제곱근까지의 모든 수를 확인하며
for i in range(2, int(math.sqrt(n)+1)):
if array[i] == True:
# i를 제외한 i의 모든 배수를 지우기
j = 2
while i * j <= n:
array[i*j] = False
j += 1
# 모든 소수 출력
for i in range(2, n+1):
if array[i]:
print(i, end = ' ') |
# This code illustrates Closure example with help of a Nested Function. Print 9x5=45
#print "Hello, World!"
def multipier_of(n):
def multipler(number):
return number*n
return multipler
solpart= multipier_of(5)
print solpart (9)
|
#! /usr/bin/env python3
import sys, re
with open(sys.argv[1]) as f:
flines = f.readlines()
p = re.compile(r'"month".+?(\d+).+"day".+?(\d+).+"nocomment".+?(\d+).+"false".+?(\d+).+"confirmed".+?(\d+).+"total".+?(\d+).+"name".+?"(.+?)"')
print('customer month/day total confirmed%')
print('---------------------------------------------')
for line in flines:
r = p.search(line)
l = []
if r:
l.append(r.group(7))
for i in range(1, 7):
try:
l.append(int(r.group(i)))
except ValueError:
l.append(r.group(i))
for i in l[3:6]:
a = i / l[6]
l.append(round(a * 100, 2))
#print(l)
print('"' + r.group(7) + '"', r.group(1) + "/" + r.group(2), r.group(6), round(int(r.group(5)) / int(r.group(6)) * 100, 2))
|
'''
Created on May 15, 2013
@author: ivano.ras@gmail.com
MIT License
'''
import sys
from time import sleep
from PyQt4.QtGui import qRgb, QVector3D
from PyQt4.QtCore import QThread, SIGNAL, QString
import MathTools
class REngineThread (QThread):
'''
(threaded) engine model class
'''
def __init__(self):
'''
REngineThread constructor.
'''
QThread.__init__ (self)
self.__Origin = [0, 0, 0]
self.__OriginWorld = []
# custom signals
self.__SIGNAL_Update = SIGNAL ('update(float)')
self.__SIGNAL_ThreadCompleted = SIGNAL ('thread_completed()')
self.__SIGNAL_IntersCreated = SIGNAL ('inters_created (PyQt_PyObject, PyQt_PyObject)')
self.__SIGNAL_VectorCreated = SIGNAL ('vector_created (PyQt_PyObject, PyQt_PyObject, QString)')
self.__SIGNAL_LineCreated = SIGNAL ('line_created (PyQt_PyObject, PyQt_PyObject, QString)')
self.__poly_model_e = None
self.__poly_list_e = []
# main loop flags + loop state vars
self.is_stopped = False
self.is_paused = False
self.resetLoopReferences ()
def setImage (self, image):
self.__image = image
self.__width = self.__image.width ()
self.__height = self.__image.height ()
if self.__width > self.__height and self.__height > 0: self.__aspect_ratio = float(self.__width)/float(self.__height)
elif self.__height > self.__width and self.__width > 0: self.__aspect_ratio = float(self.__height)/float(self.__width)
elif self.__height == self.__width and self.__width > 0: self.__aspect_ratio = 1.0
else: raise sys.exit ("*** Something wrong with the chosen resolution. Width = " + str(self.__width) + " Height = " + str(self.__height))
def setCameraNormalMatrix (self, camera_normal_transform_mtx, fovy):
self.__fovy = fovy
self.__normal_mtx = camera_normal_transform_mtx
self.__engine_mtools = MathTools.Tools (self.__normal_mtx)
self.__angle = self.__engine_mtools.getAngle (self.__fovy)
self.__world_origin = self.__engine_mtools.cameraToWorldTransform (0, 0, 0)
def __del__(self):
'''
REngineThread destructor. It makes sure the thread stops processing before it gets destroyed.
'''
self.wait()
def run (self):
'''
usual thread method 'run'
'''
self.render ()
#self.terminate ()
def render (self):
'''
this method provides the chosen view's camera rays to the core_render method.
'''
self.is_stopped = False
inv_w = 2.0/self.__width
inv_h = 2.0/self.__height
h = 0.5
a = 1
m = -1.0
ff = 255
angle_times_aspect_ratio = self.__angle * self.__aspect_ratio
while True:
if self.is_paused:
sleep (0.25)
else:
for j in range (self.j_copy, self.__height):
for i in range (self.i_copy, self.__width):
# set up basic camera rays.
w_param = ((h + i)*inv_w-1) * angle_times_aspect_ratio
h_param = (1-(h + j)*inv_h) * self.__angle
world_ray = self.__engine_mtools.cameraToWorldTransform (w_param, h_param, m)
ray_dir = [world_ray[0] - self.__world_origin[0],
world_ray[1] - self.__world_origin[1],
world_ray[2] - self.__world_origin[2]]
ray_dir_norm = self.__engine_mtools.normalise (ray_dir)
# core rendering bit.
self.core_render_test2 (i, j, ray_dir_norm, ray_dir)
# inner loop control
if self.is_stopped: break # out of the inner loop
if self.is_paused:
self.saveLoopReferences (i, j)
break # out of the inner loop
# mid loop control
if self.is_stopped: break # out of the mid loop
if self.is_paused:
break # out of the mid loop
else:
self.i_copy = 0 # normal behaviour
# update screen every 10 lines
if j%10==0: self.emit (self.__SIGNAL_Update, float(j)/float(self.__height))
self.emit (self.__SIGNAL_Update, float(j)/float(self.__height))
# outer loop control
if not self.is_stopped and not self.is_paused: # if and only if the rendering was completed then fire this signal away.
self.emit (self.__SIGNAL_ThreadCompleted)
self.resetLoopReferences ()
break # out of the outer loop
if self.is_stopped:
self.resetLoopReferences ()
break # out of the outer loop
def core_render_test2 (self, i, j, ray_dir_norm, ray_dir):
'''
This method plainly displays some camera rays intersections with any polygon in the scene. No optimisation here, just brute force approach.
'''
if j%10 == 0 and i%10 == 0: # display to screen every 10 lines 10 pixels apart.
tmp_isect_param = self.intersectRayTriangles (self.__world_origin, ray_dir_norm)
if tmp_isect_param == None:
self.__image.setPixel (i, j, qRgb (0, 0, 0))
else:
self.__image.setPixel (i, j, qRgb (255, 255, 0))
# position = self.__world_origin, orientation = ray_dir_norm
# fire inters_created signal : payload -> position in space, color
intersections_pos = [self.__world_origin[0] + ray_dir_norm[0]*tmp_isect_param,
self.__world_origin[1] + ray_dir_norm[1]*tmp_isect_param,
self.__world_origin[2] + ray_dir_norm[2]*tmp_isect_param]
# fire line_created signal : payload -> line origin in space, line direction, line type
self.emit (self.__SIGNAL_LineCreated, self.__world_origin, intersections_pos, QString('p'))
self.emit (self.__SIGNAL_IntersCreated, intersections_pos, [0,0,255])
# fire vector_created signal : payload -> vector's origin in space, vector direction, vector's type (o:outwards, i:inwards)
self.emit (self.__SIGNAL_VectorCreated, intersections_pos, ray_dir_norm, QString('i'))
def intersectRayTriangles (self, orig, dir):
'''
method dealing with ray-triangles intersections.
@param orig 3-list
@param dir 3-list
@return closest_intersection_param float
'''
closest_intersection_param = None # closest intersection to camera origin.
intersections = []
orig_v = QVector3D (orig[0], orig[1], orig[2])
dir_v = QVector3D ( dir[0], dir[1], dir[2])
intersections_list = []
for pl in self.__poly_list_e:
isect_t = self.intersect (orig_v, dir_v, pl)
if isect_t != None:
intersections_list.append ([pl, isect_t])
# order the intersections_list
tmp = 100000
if len(intersections_list) > 0:
for isectn in intersections_list:
if isectn[1] < tmp:
tmp = isectn[1]
closest_intersection_param = tmp
return closest_intersection_param
def intersect (self, orig_v, dir_v, pl):
'''
method performing ray-triangle intersection (Moller-Trumbore algorithm)
@param orig QVector3D
@param dir QVector3D
@return isect_t float or None
'''
e1 = pl[1] - pl[0]
e2 = pl[2] - pl[0]
p = QVector3D.crossProduct (dir_v, e2)
p_dot_e1 = QVector3D.dotProduct (p, e1)
if p_dot_e1 == 0:
return None
inv_p_dot_e1 = 1.0 / p_dot_e1
t = orig_v - pl[0]
isect_u = inv_p_dot_e1 * QVector3D.dotProduct (p, t)
if isect_u<0 or isect_u>1:
return None
q = QVector3D.crossProduct (t, e1)
isect_v = inv_p_dot_e1 * QVector3D.dotProduct (q, dir_v)
if isect_v<0 or isect_u + isect_v>1:
return None
isect_t = inv_p_dot_e1 * QVector3D.dotProduct (e2, q)
return isect_t
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def setModel (self, model):
'''
This method transfers a copy of the model (polygons list) to the REngineThread class instance.
'''
self.__poly_model_e = model
self.__poly_list_e = model.getPolyListCopy ()
def setIsStoppedFlag (self, boo): self.is_stopped = boo
def setIsPausedFlag (self, boo): self.is_paused = boo
def resetLoopReferences (self):
self.i_copy = 0
self.j_copy = 0
def saveLoopReferences (self, i, j):
self.i_copy = i
self.j_copy = j
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def core_render_test1 (self, i, j, ray_dir_norm, ray_dir):
'''
This method just "renders" plain vector directions from the center of the camera.
No fancy user controls. Just a sweep.
'''
ff = 255; a = 1; h = 0.5;
intersections_pos = [self.__world_origin[0] + ray_dir[0],
self.__world_origin[1] + ray_dir[1],
self.__world_origin[2] + ray_dir[2]]
self.__image.setPixel (i, j, qRgb ((ff * (a + ray_dir[0]) * h), (ff * (a + ray_dir[1]) * h), 0))
if j%100 == 0 and i%100 == 0: # display to screen every 10 lines 10 pixels apart.
# fire line_created signal : payload -> line origin in space, line direction, line type
# position = self.__world_origin, orientation = world_ray
self.emit (self.__SIGNAL_LineCreated, self.__world_origin, ray_dir, QString('o'))
# fire vector_created signal : payload -> vector's origin in space, vector direction, vector's type (o:outwards, i:inwards)
self.emit (self.__SIGNAL_VectorCreated, self.__world_origin, ray_dir, QString('o'))
# fire inters_created signal : payload -> position in space, color
self.emit (self.__SIGNAL_IntersectCreated, intersections_pos, [0,0,ff])
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 28 2019
@author: Stacy Bridges
This script does a few different things:
it uses country matcher on a long text
it analyzes the syntax
it updates doc entities with the matched countries
"""
import spacy
import json
from spacy.lang.en import English
from spacy.matcher import PhraseMatcher
from spacy.tokens import Span
def main():
with open('countries.json') as f:
COUNTRIES = json.loads(f.read())
with open('country_text.txt') as f:
TEXT = f.read()
nlp = English()
matcher = PhraseMatcher(nlp.vocab)
patterns = list(nlp.pipe(COUNTRIES))
matcher.add('COUNTRY', None, *patterns)
# Create a doc and find matches in it
doc = nlp(TEXT)
# test print of ents
print('test print of ents: -----------------------------')
print([(ent.text, ent.label_) for ent in doc.ents])
print('\n')
# Iterate over the matches
print('iterate over the matches: -----------------------')
for match_id, start, end in matcher(doc):
# Create a Span with the label for 'GPE'
span = Span(doc, start, end, label = 'GPE')
# Overwire the doc.ents and add the span
doc.ents = list(doc.ents) + [span]
# Get the span's root head token
span_root_head = span.root.head
# Print the text of the span root's
# head token and the span text
print(span_root_head.text, '-->', span.text)
# print spacer
print('\n')
# Print the entities in the document
print('fin print of ents: -----------------------------')
print([(ent.text, ent.label_) for ent in doc.ents if ent.label_ == 'GPE'])
if __name__ == '__main__' : main()
|
# coding: utf-8
# In[2]:
#taanilan git: valmistelu.ipynb
#https://github.com/taanila/tilastoapu/blob/master/valmistelu.ipynb
import pandas as pd
# In[3]:
df = pd.read_excel('http://taanila.fi/data1.xlsx')
df.head() #5 ensimmäistä riviä
# In[9]:
df.tail() #5 viimeistä riviä, id antaa rivimäärän
# In[24]:
print( df.shape ) #antaa datarivien ja sarakkeiden määrän
print (df.columns) #antaa sarakkeiden nimet
# In[21]:
df.columns = ['nro', 's', 'i', 'p', 'k', 'pv', 'pk',
'j', 'tto', 'ty', 'palkkat', 'tt', 'tte', 'l',
'k', 'h'] #annetaan uusien sarakenimien lista (listat aina hakasulkeissa)
print (df.columns)
# In[31]:
df.rename(columns={'työymp':'työymp_id','perhe':'perhe_id'}, inplace=True)
#käytetään sanakirjaa (aina aaltosulkeiden sisällä ja parit puolipistein erotettuna)
#inplace=True tallentaa muutokset dataobjektiin pysyvästi
df.head()
# In[32]:
df.describe() #tilastotiedot
#https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.describe.html
# In[34]:
df.corr() #kaikki 2-muuttujan korrelaatiokertoimet
#https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.corr.html
# In[4]:
df.count() #kaikkien sarakkeiden count
# In[6]:
df.isnull().sum() #kaikkien puuttuvien tietojen summa
# In[7]:
for var in df: #käydään kaikki muuttujat dataframen sisältä läpi
print(var, pd.unique(df[var])) #haetaan jokaisesta sarakkeesta uniikkien tietojen lukumäärä
# In[9]:
df['sukup_teksti'] = df['sukup'].replace({1:'Mies', 2:'Nainen'}) #lisätään sarake ja muunnetaan sanakirjaa (dictionary) käyttäen 'sukup'-sarakkeen arvot
df.head(6)
# In[11]:
ika_bins = [18, 28, 38, 48, 58, 68]
df['ikäluokka'] = pd.cut(df['ikä'], ika_bins).astype(str) #luokitellaan ikä-tiedot valmiisiin koreihin
df.head(6)
# In[15]:
df['tyytyväisyys'] = df[['johto','työtov','työymp','palkkat','työteht']].mean(axis=1) #lasketaan keskiarvot valituista sarakkeista vaakasuuntaan
df[['nro','sukup','ikä','tyytyväisyys']].head(6)
# In[16]:
df['käyttö']= df[['työterv', 'lomaosa', 'kuntosa', 'hieroja']].count(axis=1)
df.head(6)
# In[17]:
df[4:6] #haetaan rivit 4-5
# In[18]:
df[df['palkka']>4000] #haetaan rivit yhden sarakkeen ehdon mukaan
# In[19]:
df[(df['sukup']==1) & (df['palkkat']<3)] #haetaan rivit ehdon mukaan: palkkaan tyytymättömät miehet
# In[20]:
df[df['käyttö']==0]
# In[24]:
df[['tyytyväisyys', 'käyttö']][df['käyttö']>=3] #valitaan sarakkeet ja haetaan rivit kolmannen sarakkeen ehdolla
# In[26]:
df[['palkka', 'palkkat']].sort_values(by='palkka').head(10) #järjestetään tulokset sarakkeen mukaan
# In[28]:
df[['palkka', 'palkkat']].sort_values(by='palkka', ascending=False).head(10)
# In[29]:
df.drop(['nro','ikä','palveluv','palkka'], axis=1).head()
# In[33]:
writer = pd.ExcelWriter('valmisteltu.xlsx', engine='xlsxwriter')
df.to_excel(writer)
writer.save()
|
#!/usr/bin/env python3
import sys, argparse, matplotlib
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import root_scalar, root
from scipy.special import erf
from numpy import pi, sqrt, real, imag
from collections import defaultdict, namedtuple
from functools import partial
from termcolor import colored
parser = argparse.ArgumentParser(
description='Solver for analyzing the Mullins--Sekerka instability of a multicomponent mixture')
modes = { 'b': 'bifurcation', 'f': 'fixedGV', 'G': 'diagramVk', 'V': 'diagramGk', '3': 'diagramVGk', 'n': None }
class ParseMode(argparse.Action):
def __call__(self, parser, namespace, values, option_string):
setattr(namespace, self.dest, modes[values[0]])
str2pair = lambda s: [float(item) for item in s.split(':')]
parser.add_argument('mode', choices=[*modes.keys(), *modes.values()], action=ParseMode, help='Execution mode')
parser.add_argument('-N', type=int, default=100, help='number of points')
parser.add_argument('-k', '--kratio', type=float, default=1, help='k_S/k_L')
parser.add_argument('-K', type=float, default=0.5, help='partition coefficient')
parser.add_argument('-V', type=float, default=0.02, help='capillary length/diffusion length')
parser.add_argument('-VD', type=float, default=np.infty, help='solute trapping velocity')
parser.add_argument('-G', type=float, default=0.01, help='capillary length/thermal length')
parser.add_argument('-s', '--figsize', type=str2pair, default='5:4', help='figure size')
parser.add_argument('-a', '--asymptotics', action='store_true', help='plot the asymptotics as well')
parser.add_argument('-l', '--log', action='store_true', help='use log scale for V and G')
parser.add_argument('-w', '--wavelength', action='store_true', help='use wavelength instead of wavenumber')
parser.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')
parser.add_argument('-d', '--debug', action='store_true', help='maximum information')
parser.add_argument('--stdin', action='store_true', help='read bunch of (G,V) pairs from stdin')
parser.add_argument('-o', '--output', type=str, default=None, help='PDF filename')
parser.add_argument('--xrange', type=str2pair, default=None, help='range of values along x-axis')
parser.add_argument('--yrange', type=str2pair, default=None, help='range of values along y-axis')
parser.add_argument('--pad', type=float, default=0.1, help='amount of padding around the figures (inches)')
parser.add_argument('--pdf', action='store_true', help='save a PDF file instead')
args = parser.parse_args()
### Alloy properties for bcc Fe-Cr-Ni taken from [Bobadilla, Lacaze & Lesoult 1988]
### Solute trapping length = 5nm is taken from [KGT 1986] for Ag-Cu and used in [Loser & Herlach 1992]
T_M = 1776 # Melting point of the pure solvent
Solute = namedtuple('Solute', 'C m K D')
alloy = {
'Cr': Solute(0.185, -0.67, 0.9, 1.8e-9),
'Ni': Solute(0.110, -3.13, 0.8, 0.8e-9)
}
Dmean = np.mean([ s.D for s in alloy.values() ])
_r = lambda s: s.D/Dmean
_dT = lambda s: s.m*s.C*(s.K-1)/s.K
DeltaT = np.sum([ _dT(s) for s in alloy.values() ])
class Style:
dotted = { 'linestyle': ':', 'linewidth': 0.5, 'color': 'k' }
dashed = { 'linestyle': '--', 'linewidth': 0.5, 'color': 'k' }
thin = { 'linestyle': '-', 'linewidth': 0.5, 'color': 'k' }
thick = { 'linestyle': '-', 'linewidth': 1, 'color': 'k', 'zorder': 10 }
unstable = { 'label': r'$\mathrm{unstable\ region}$', 'hatch': 'XX', 'color':'none', 'edgecolor': 'gray' }
gray = { 'color': 'lightgray' }
point = { 'color': 'black', 'marker': '.', 'linestyle': 'None', 'zorder': 10 }
surface = { 'rstride': 1, 'cstride': 1, 'edgecolor': 'none', 'alpha': 0.5 }
annotate = { 'xytext': (0,3), 'textcoords': 'offset points' }
### Global constants
almost_one = 1 - 10*np.finfo(float).eps
small = np.finfo(float).eps
factorY = 1.1
kmin, kmax = 1e-5, 1e5 #TODO: provide some reasonable estimates
### Parameter-dependent constants
logN = np.log10(args.N)
### Auxiliary functions in the a_0(k) relation
_q = lambda a,k,s: 1/2 + sqrt(1/4 + _r(s)*a + (_r(s)*k)**2)
_q_a = lambda a,k,s: _r(s)/(2*_q(a,k,s) - 1)
_q_kk = lambda a,k,s: _r(s)**2/(2*_q(a,k,s) - 1)
_p = lambda k,s: _q(0,k,s)
### Partition coefficient and its derivative
_K = lambda v,s: (s.K + v/args.VD)/(1 + v/args.VD)
_dK = lambda v,s: (1 - s.K)/args.VD/(1 + v/args.VD)**2
### Formulas for calculating calG and I
_calG = lambda g,v: 2*g/(args.kratio+1)/v**2
_I = lambda v,s: _dT(s)/DeltaT/_r(s)/v
_G = lambda calG,v: calG*(args.kratio+1)*v**2/2
### Absolute stability and constitutional undercooling limits
_Vmax_eq = lambda v: np.sum([ _r(s)**2*_I(v,s)/_K(v,s) for s in alloy.values() ], axis=0) - 1
Vmax = root_scalar(_Vmax_eq, bracket=[small, 10/args.K]).root
_Vmin_eq = lambda g,v: np.sum([ _I(v,s) for s in alloy.values() ], axis=0) - _calG(g,v)
_Vmin = np.vectorize(lambda g: root_scalar(partial(_Vmin_eq, g), bracket=[small, 10*Vmax]).root)
_Gmin = np.vectorize(lambda v: root_scalar(_Vmin_eq, args=v, bracket=[small, 10*Vmax]).root)
### A simple estimate for the reference wavenumber (depends on G, V, kratio, K)
_sumI = lambda v: np.sum([ _I(v,s) for s in alloy.values() ], axis=0)
_k0 = lambda g,v: sqrt(max(0, _sumI(v) - _calG(g,v)))
### Dispersion relation
_Iqq = lambda a,k,v,s: _I(v,s)*(_q(a,k,s) - 1 - _r(s)*a)/(_q(a,k,s) - 1 + _K(v,s))
_Iqq_kk = lambda a,k,v,s: _q_kk(a,k,s)*_I(v,s)*(_K(v,s) + _r(s)*a)/(_q(a,k,s) - 1 + _K(v,s))**2
__f = lambda a,k,g,v: np.sum([ _Iqq(a,k,v,s) for s in alloy.values() ], axis=0) - _calG(g,v) - k**2
__f_kk = lambda a,k,g,v: np.sum([ _Iqq_kk(a,k,v,s) for s in alloy.values() ], axis=0) - 1
### Formulas for finding the most unstable wavenumber
_most_eq = lambda a,k,g,v: (__f(a,k,g,v), __f_kk(a,k,g,v))
### Formulas for marginal stability (a_0 = 0)
_calG_kv = lambda k,v: np.sum([ _Iqq(0,k,v,s) for s in alloy.values() ], axis=0) - k**2
_G_kv = lambda k,v: _G(_calG_kv(k,v), v)
### Formulas for finding bifurcation points
_V_bif_eq = lambda v,k: np.sum([ _Iqq_kk(0,k,v,s) for s in alloy.values() ], axis=0) - 1
_V_bif = np.vectorize(lambda k: root_scalar(_V_bif_eq, args=k, bracket=[small, Vmax]).root + small)
### Other functions
interval_mesh = lambda a, b, va, vb: (erf(np.linspace(-va, vb, args.N)) + 1)*(b-a)/2 + a
make_log = lambda f: np.log10(f) if args.log else f
k2k = lambda k,v: 2*pi/k/v if args.wavelength else k
l2k = lambda l,v: 2*pi/l/v
_k2k = lambda k: k2k(k, args.V)
klabel = lambda p='', s='': f'${p}'+r'\hat\lambda'+f'{s}$' if args.wavelength else f'${p}k{s}$'
add_tmargin = lambda a,b: (a, b+(factorY-1)*(b-a))
def error(msg):
print(colored(msg, 'red'), file=sys.stderr)
np.seterr(all='raise') # Consider warnings as errors
if args.pdf:
matplotlib.use('pgf')
params = {
'axes.labelsize': 11,
'font.size': 11,
'legend.fontsize': 10,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'text.usetex': True,
'pgf.rcfonts': False,
'figure.figsize': args.figsize
}
plt.rcParams.update(params)
### Calculate Gmax
_Iqq0_v = lambda k,v,s: _I(v,s)*(_p(k,s) - 1)*(_p(k,s) - 1 + _K(v,s) + _dK(v,s)*v)/(_p(k,s) - 1 + _K(v,s))**2
_calGmax = lambda k,v: np.sum([ _Iqq0_v(k,v,s) for s in alloy.values() ], axis=0)/2
_k_star_eq = lambda k: _calG_kv(k, _V_bif(k)) - _calGmax(k, _V_bif(k))
k_star = root_scalar(_k_star_eq, bracket=[1e-3, 1e3]).root
v_star = _V_bif(k_star)
Gmax = _G_kv(k_star, v_star)
if args.verbose:
print(f'Vmax = {Vmax:.5g} at G = 0 and k -> 0')
print(f'Gmax = {Gmax:.5g} at V = {v_star:.5g} and k = {k_star:.5g}')
### Dump stability results for given pairs of (V,G) reading from stdin
if args.stdin:
Vs, Gs, Ks = defaultdict(list), defaultdict(list), defaultdict(list)
for line in sys.stdin:
v,g,label = line.rstrip('\n').split(':')
g,v = [ float(f) for f in (g,v) ]
if v > Vmax:
k_max, status = np.infty, 'absolute_stability'
elif v < _Vmin(g):
k_max, status = np.infty, 'constitutional_supercooling'
elif g > Gmax:
k_max, status = np.infty, 'too_large_gradient'
else:
k0 = _k0(g,v)
a0 = -__f(0,k0,g,v)
res = root(lambda x: _most_eq(*x,g,v), [a0,k0], method='lm')
if not res.success:
k_max, status = np.infty, 'failed'
else:
a_max, k_max = res.x
k_max, status = np.abs(k_max), f'a={a_max}'
Vs[label].append(v); Gs[label].append(g); Ks[label].append(k_max)
print(f'{k2k(k_max,v)}:{status}', flush=True)
### Mode 1: Bifurcation diagram in the (V,G) coordinates
### Stable region corresponds to the MS stability for any wave numbers
if args.mode == modes['b']:
if args.verbose:
print(f' -- Plot the (V,G) bifurcation diagram')
# Asymptotic approximations of v(k) and g(k) for large wavenumbers
_Vlarge_eq = lambda k,v: np.sum([ _K(v,s)*_I(v,s)/_r(s) for s in alloy.values() ], axis=0) - 2*k**3
_Vlarge = np.vectorize(lambda k: root_scalar(partial(_Vlarge_eq, k), bracket=[small, 10*Vmax]).root)
_Glarge_kv = lambda k,v: _G(np.sum([ _I(v,s) for s in alloy.values() ], axis=0) - k**2, v)
_Glarge = lambda k: _Glarge_kv(k, _Vlarge(k))
K = np.logspace(-1, 1, args.N)*k_star
V = _V_bif(K)
G = _G_kv(K,V)
if args.verbose:
fig, axs = plt.subplots(ncols=2, figsize=args.figsize[0]*np.array((2.2,1)))
axs[0].plot(k2k(K,V), V)
axs[0].plot(k2k(k_star,v_star), v_star, **Style.point)
axs[0].set_xlabel(klabel(s='_\mathrm{bif}'))
axs[0].set_ylabel(r'$\hat{V}$', rotation=0, color='C0')
ax = axs[0].twinx()
ax.plot(k2k(K,V), G, color='C1')
ax.plot(k2k(k_star,v_star), Gmax, **Style.point)
ax.set_ylabel(r'$\hat{G}$', rotation=0, color='C1')
if args.log:
axs[0].loglog(); ax.loglog()
else:
axs[0].semilogx(); ax.semilogx()
if args.asymptotics:
K1, K2 = np.split(K, 2)
V1, V2 = np.split(V, 2)
axs[0].plot(k2k(K2,V2), _Vlarge(K2), **Style.dashed)
ax.plot(k2k(K2,V2), _Glarge(K2), **Style.dashed)
K = np.logspace(-logN+1, logN-1, args.N)*k_star
V = _V_bif(K)
G = _G_kv(K,V)
ax = axs[1] if args.verbose else plt
ax.plot(V, G)
ax.fill_between(V, np.min(G) + 0*G, G, **Style.unstable)
if args.stdin:
for label in Ks:
ax.plot(Vs[label], Gs[label], label=label)
if args.verbose:
ax.set_xlim(left=V[-1])
else:
ax.xlim(left=V[-1])
ax.legend()
ax.plot(v_star, Gmax, **Style.point)
ax.annotate(r'$\hat{G}_\mathrm{max}$', (v_star, Gmax), **Style.annotate)
if args.verbose:
ax.set_xlabel(r'$\hat{V}$')
ax.set_ylabel(r'$\hat{G}$', rotation=0)
else:
ax.xlabel(r'$\hat{V}$')
ax.ylabel(r'$\hat{G}$', rotation=0)
if args.log:
ax.loglog()
ax.margins(0, 0)
else:
if args.verbose:
ax.set_xlim(0, Vmax)
ax.set_ylim(add_tmargin(0, Gmax))
else:
ax.xlim(0, Vmax)
ax.ylim(add_tmargin(0, Gmax))
if args.asymptotics:
ax.plot(V, _Gmin(V), **Style.dashed)
ax.axvline(Vmax, **Style.dashed)
### Mode 2: Amplification rate (a_0) vs wave number (k)
elif args.mode == modes['f']:
if args.verbose:
print(f' -- Plot a0(k) for given G = {args.G} and V = {args.V}')
calG = _calG(args.G, args.V)
omax = 10
k0 = _k0(args.G, args.V)
if args.verbose:
print(f'k_0 = {k0:.5g}')
# Equation for a_0(k) and its derivatives w.r.t. a_0 and k^2
def _a_eq(a,k):
res = calG + k**2
for s in alloy.values():
res -= _I(args.V,s)*(_q(a,k,s)-1-_r(s)*a)/(_q(a,k,s)-1+s.K)
return res
def _a_eq_a(a,k):
res = 0*k
for s in alloy.values():
res -= _I(args.V,s)*(_q_a(a,k,s)*(s.K+_r(s)*a)-_r(s)*(_q(a,k,s)-1+s.K))/(_q(a,k,s)-1+s.K)**2
return res
def _a_eq_kk(a,k):
res = k**0
for s in alloy.values():
res -= _I(args.V,s)*_q_kk(a,k,s)*(s.K+_r(s)*a)/(_q(a,k,s)-1+s.K)**2
return res
### 1. Find the critical point (where a1(k)=a2(k)); a_0 is complex behind this point
if args.G > 0:
# Try the exact solution for Dratio=0 and binary mixture as an initial guess
calG = 1 - calG*args.V
A = calG + 2*_K(args.V) - 1
D = (A-2/args.V)**2 + (2*_K(args.V))**2 - A**2
if D > 0:
B = A-2/args.V + sqrt(D)
if B > 0:
k_crit = sqrt(B/args.V)
_a_crit = lambda k: -k**2 - (1 - (calG-args.V*k**2)**2)/4
a_crit = _a_crit(k_crit)
try:
k_crit
except NameError:
error('Failed to use a good initial guess!')
a_crit, k_crit = 1, 1
if args.verbose:
print(f'Initial guess for the critical point: k = {k_crit:.5g}, a = {a_crit:.5g}')
try:
_ak_crit_eq = lambda x: (_a_eq(*x), _a_eq_a(*x))
res = root(_ak_crit_eq, [a_crit, k_crit])
if not res.success:
raise ValueError(res.message)
a_crit, k_crit = res.x
if args.verbose:
plt.plot(_k2k(k_crit), a_crit, **Style.point)
shift = Style.annotate['xytext'][1]
style = Style.annotate | { 'xytext': (shift, shift) }
plt.annotate(klabel(s='_c'), (_k2k(k_crit), a_crit), **style)
print(f'Critical point: k = {k_crit:.5g}, a = {a_crit:.5g}')
except (ValueError, FloatingPointError) as err:
error(f'Failed to find the critical point: {err}')
else:
k_crit = 0
### 2. Find the extremum point and roots of a1(k)
try:
_ak_max_eq = lambda x: (_a_eq(*x), _a_eq_kk(*x))
a0 = -_a_eq(0, k0) # Initial guess in the quasi-stationary approximation
# NB: Levenberg-Marquardt algorithm is used here as more stable
res = root(_ak_max_eq, [a0, k0], method='lm')
if not res.success:
raise ValueError(res.message)
a_max, k_max = res.x
k_max = np.abs(k_max) # Since a negative solution is equivalent to a positive one
if args.verbose:
plt.plot(_k2k(k_max), a_max, **Style.point)
plt.annotate(klabel(s='_m'), (_k2k(k_max), a_max), **Style.annotate)
print(f'Maximum point: k = {k_max:.5g}, a = {a_max:.5g}')
Kzero = []
if a_max > 0:
Kzero.append(root_scalar(partial(_a_eq, 0), bracket=[k_max,kmax]).root)
if args.G > 0:
Kzero.append(root_scalar(partial(_a_eq, 0), bracket=[kmin, k_max]).root)
Kzero = np.array(Kzero)
if args.verbose:
plt.plot(_k2k(Kzero), np.zeros_like(Kzero), **Style.point)
I = [2,1] if args.G > 0 else [0]
for i,k in zip(I,Kzero):
shift = Style.annotate['xytext'][1]
style = Style.annotate | { 'xytext': (-4*shift if i==1 else 0, shift) }
plt.annotate(klabel(s=f'_{i}'), (_k2k(k), 0), **style)
print(f'Neutral stability points: k =', ', '.join(f'{k:.5g}' for k in Kzero))
else:
print('The planar front is unconditionally stable.')
except (ValueError, FloatingPointError) as err:
error(f'Failed to find a maximum point: {err}')
### 3. Create a mesh and find solutions on it
# Lower boundary for a_0
_amin_ = lambda k,s: -_r(s)*k**2 - 1/4/_r(s)
def _amin(k):
res = 0*k - np.infty
for s in alloy.values():
res = np.maximum(res, _amin_(k,s))
return almost_one*res
# For a binary mixture: _amean = lambda k: -k**2 - (1 - (calG-args.V*k**2)**2)/4
_amean = lambda k: root_scalar(_a_eq_a, args=k, bracket=[_amin(k), omax]).root
K = np.geomspace(*args.xrange, args.N) if args.xrange else np.logspace(-1.5, 0.5, args.N)*k0
if k_crit > K[0]:
# Refine the mesh near the critical point
K = np.r_[K[K<k_crit], interval_mesh(k_crit, K[-1], logN+1, 0)]
m0 = _a_eq_a(_amin(K), K) < 0 # if amin < amean, where a2 < amean < a1
m1 = np.zeros_like(K, dtype=bool) # if a1 (larger) is real
m2 = np.copy(m1) # if a2 (smaller) is real
A1, Amean, A2 = [], [], []
for i, k in enumerate(K):
if m0[i]:
Amean.append(_amean(k))
if _a_eq(Amean[-1], k) <= 0:
m1[i] = True
A1.append(root_scalar(_a_eq, args=k, bracket=[Amean[-1], omax]).root)
if _a_eq(_amin(k), k) >= 0:
m2[i] = True
A2.append(root_scalar(_a_eq, args=k, bracket=[_amin(k), Amean[-1]]).root)
else:
if _a_eq(_amin(k), k) <= 0:
m1[i] = True
A1.append(root_scalar(_a_eq, args=k, bracket=[_amin(k), omax]).root)
A1, Amean, A2 = np.array(A1), np.array(Amean), np.array(A2)
amax = np.max(A1)
### 4. Plot a1(k)
plt.plot(_k2k(K[m1]), A1)
plt.semilogx()
plt.xlabel(klabel())
plt.ylabel(r'$a_0$', rotation=0)
plt.axhline(y=0, **Style.thin)
### 5. Plot a2(k)
if args.verbose:
plt.plot(_k2k(K[m2]), A2)
plt.plot(_k2k(K[m0]), Amean, **Style.dotted)
plt.fill_between(_k2k(K[m0]), _amin(K)[m0], Amean, **Style.gray)
plt.ylim(add_tmargin(np.min(np.r_[A1,A2]), np.max(A1)))
### 6. Find and plot complex a(k) for k < k_crit
if args.verbose and k_crit > K[0]:
_ac = lambda x: x[0] + x[1]*1j
_a_complex_eq = lambda x,k: (real(_a_eq(_ac(x),k)), imag(_a_eq(_ac(x),k)))
Kc = interval_mesh(K[0], k_crit, 1, logN)[::2]
A_guess = np.vectorize(_amean)(Kc)
A_real, A_imag = np.array([ root(_a_complex_eq, [a,-a], args=k).x for a,k in zip(A_guess,Kc) ]).T
plt.plot(Kc, A_real)
plt.ylim(add_tmargin(np.min(np.r_[A1,A2]), np.max(np.r_[A1,A_real])))
### Mode 3a: Stability diagram in the (V,k) coordinates
elif args.mode == modes['G']:
if args.verbose:
print(f' -- Plot the (V,k) stability diagram for given G = {args.G}')
if (args.G < 0 or args.G > Gmax):
print('The planar front is unconditionally stable.')
sys.exit()
### 1. Find two bifurcation points and create a mesh between them
if args.G > 0:
_k_eq = lambda k: _calG_kv(k, _V_bif(k)) - _calG(args.G, _V_bif(k))
k1 = root_scalar(_k_eq, bracket=[kmin, k_star]).root
k2 = root_scalar(_k_eq, bracket=[k_star, kmax]).root
K_bif = np.array([k2, k1])
V_bif = _V_bif(K_bif)
V = interval_mesh(*V_bif, logN+2, logN+1)
else:
K_bif = [np.inf, 0]
V_bif = [0, Vmax]
V = interval_mesh(*V_bif, -1, logN)
if args.verbose:
for i,s in zip(range(2), ['Min', 'Max']):
print(f'{s} unstable V = {V_bif[i]:.5g} with k = {K_bif[i]:.5g}')
### 2. Find the most unstable curve
if args.G > 0:
# NB: it is crucial that _k_guess(v) is a straight line on a log-log plot
_k_guess = lambda v: np.exp(np.interp(np.log(v), np.log(V_bif), np.log(K_bif)))
else:
# NB: kc is not the best guess since kc~V^1/3, but k_most~V^1/2
_k_guess = np.vectorize(lambda v: root_scalar(lambda k: _V_bif(k)-v, bracket=[kmin, kmax]).root)
A_most, K_most = np.array([
root(lambda x: _most_eq(*x, args.G, v), [0, _k_guess(v)], method='lm').x for v in V ]).T
### 3. Find the boundary of the MS instability
_k_eq = lambda k,v: _calG_kv(k,v) - _calG(args.G, v)
K2 = np.array([ root_scalar(_k_eq, args=v, bracket=[k, kmax]).root for k,v in zip(K_most,V) ])
if args.G > 0:
K1 = np.array([ root_scalar(_k_eq, args=v, bracket=[kmin, k]).root for k,v in zip(K_most,V) ])
else:
K1 = 2e-2*np.ones_like(K2)
if args.wavelength:
K1 = K1/V
plt.ylim(np.min(k2k(K2,V))/factorY, k2k(K1,V)[0])
else:
plt.ylim(K1[0], factorY*K2[0])
### 4. Plot the stability diagram
plt.plot(V, k2k(K1,V))
plt.plot(V, k2k(K2,V), color='C0')
plt.plot(V, k2k(K_most,V), label=r'$\mathrm{the\ most\ unstable}$')
plt.fill_between(V, k2k(K1,V), k2k(K2,V), **Style.unstable)
plt.loglog() if args.log else plt.semilogy()
plt.xlabel(r'$\hat{V}$')
plt.ylabel(klabel(), rotation=0)
plt.legend()
if args.xrange:
plt.xlim(args.xrange)
if args.yrange:
plt.ylim(args.yrange)
if args.verbose:
if args.G > 0:
plt.plot(V_bif, k2k(K_bif,V_bif), **Style.point)
### Mode 3b: Stability diagram in the (G,k) coordinates
elif args.mode == modes['V']:
if args.verbose:
print(f' -- Plot the (G,k) stability diagram for given V = {args.V}')
if (args.V <= 0 or args.V >= Vmax):
print('The planar front is unconditionally stable.')
sys.exit()
### 1. Find the bifurcation point and create a mesh up to it
_k_eq = lambda k: _V_bif(k) - args.V
k_bif = root_scalar(_k_eq, bracket=[kmin, kmax]).root
G_bif = _G_kv(k_bif, args.V)
G = interval_mesh(0, G_bif, 1, logN+1)
if args.verbose:
print(f'Max unstable G = {G_bif:.5g} with k = {k_bif:.5g}')
### 2. Find the most unstable curve
A_most, K_most = np.array([
root(lambda x: _most_eq(*x, g, args.V), [0, k_bif], method='lm').x for g in G ]).T
### 3. Find the boundary of the MS instability
_k_eq = lambda k,g: _calG_kv(k, args.V) - _calG(g, args.V)
K1 = np.array([ root_scalar(_k_eq, args=g, bracket=[kmin, k]).root for k,g in zip(K_most,G) ])
K2 = np.array([ root_scalar(_k_eq, args=g, bracket=[k, kmax]).root for k,g in zip(K_most,G) ])
### 4. Plot the stability diagram
plt.plot(G, _k2k(K1))
plt.plot(G, _k2k(K2), color='C0')
plt.fill_between(G, _k2k(K1), _k2k(K2), **Style.unstable)
plt.plot(G, _k2k(K_most), label=r'$\mathrm{the\ most\ unstable}$')
plt.loglog() if args.log else plt.semilogy()
plt.xlabel(r'$\hat{G}$')
plt.ylabel(klabel(), rotation=0)
plt.legend()
if args.verbose:
plt.plot(G_bif, _k2k(k_bif), **Style.point)
### Mode 3c: Stability diagram in the (V,G,k) coordinates
elif args.mode == modes['3']:
if args.verbose:
print(f' -- Plot the (V,G,k) stability diagram')
from mpl_toolkits import mplot3d
fig = plt.figure()
ax = plt.axes(projection='3d')
### 1. Create a 2D mesh in the (G,t) coordinates,
### where t is a normalized distance between the bifurcation points
G = interval_mesh(0, Gmax, 1, logN+1)
T = interval_mesh(0, 1, logN+2, logN+1)
GG, TT = np.meshgrid(G, T)
### 2. Find two bifurcation curves and create meshes between them
_k_eq = lambda k,g: _calG_kv(k, _V_bif(k)) - _calG(g, _V_bif(k))
K1 = np.array([ root_scalar(_k_eq, args=g, bracket=[kmin, k_star]).root for g in G ])
K2 = np.array([ root_scalar(_k_eq, args=g, bracket=[k_star, kmax]).root for g in G ])
K12 = np.array([K2,K1])
V12 = _V_bif(K12)
_t2v = lambda t,v1,v2: np.interp(t, [0,1], [v1,v2])
VV = np.array([ _t2v(T, *v12) for v12 in V12.T ]).T
### 3. Find the most unstable surface
_k_guess = lambda v, v12, k12: np.exp(np.interp(np.log(v), np.log(v12), np.log(k12)))
KK_guess = np.array([ _k_guess(_t2v(T, *v12), v12, k12) for k12,v12 in zip(K12.T,V12.T) ]).T
_most_eq_gvk = np.vectorize(lambda g,v,k_guess:
tuple(root(lambda x: _most_eq(*x,g,v), [0,k_guess], method='lm').x))
AA_most, KK_most = _most_eq_gvk(GG, VV, KK_guess)
### 4. Find the boundary of the MS instability
_k_eq = lambda k,g,v: _calG_kv(k,v) - _calG(g,v)
_k1_eq_gvk = np.vectorize(lambda g,v,k_most:
root_scalar(_k_eq, args=(g,v), bracket=[k_most, kmax]).root)
_k2_eq_gvk = np.vectorize(lambda g,v,k_most:
root_scalar(_k_eq, args=(g,v), bracket=[kmin, k_most]).root)
KK1 = _k1_eq_gvk(GG, VV, KK_most)
KK2 = _k2_eq_gvk(GG, VV, KK_most)
### 5. Set logarithmic scales
# NB: due to some bug in Matplotlib, ax.set_xscale('log') doesn't work for all axes;
# therefore, we have to transform the data manually.
pKK_most = np.log10(k2k(KK_most,VV)); pKK1 = np.log10(k2k(KK1,VV)); pKK2 = np.log10(k2k(KK2,VV))
pVV = make_log(VV); pGG = make_log(GG)
ax.set_zlabel(klabel(p=r'\log_{10}'))
if args.log:
ax.set_xlabel(r'$\log_{10}\hat{V}$'); ax.set_ylabel(r'$\log_{10}\hat{G}$')
else:
ax.set_xlabel(r'$\hat{V}$'); ax.set_ylabel(r'$\hat{G}$')
### 6. Plot the 3D stability diagram
ax.plot_surface(pVV, pGG, pKK1, **Style.surface, color='gray')
ax.plot_surface(pVV, pGG, pKK2, **Style.surface, color='gray')
p = ax.plot_surface(pVV, pGG, pKK_most, cmap='viridis')
fig.colorbar(p)
if args.verbose:
v0 = make_log(v_star); g0 = make_log(Gmax); k0 = np.log10(k2k(k_star,v_star))
ax.plot3D(v0, g0, k0, **Style.point)
V1 = make_log(_V_bif(K1)); V2 = make_log(_V_bif(K2)); G = make_log(G)
K1 = np.log10(k2k(K1,_V_bif(K1))); K2 = np.log10(k2k(K2,_V_bif(K2)))
ax.plot3D(V1, G, K1, **Style.thick)
ax.plot3D(V2, G, K2, **Style.thick)
if args.mode:
filename = args.output if args.output else f'{args.mode}.pdf'
plt.tight_layout(pad=1)
if args.pdf:
if args.verbose:
print(f' -- Save to {filename}')
plt.savefig(filename, bbox_inches='tight', pad_inches=args.pad)
else:
plt.show()
|
# coding=utf-8
__author__ = 'xyc'
import functools
# The Chain of Responsibility Pattern is designed to decouple the sender of a
# request from the recipient that processes the request.
# 将能处理请求的对象连成一条链,并沿着这条链传递该请求,直到有一个对象处理请求为止,避免请求的发送者和接收者之间的耦合关系。
def coroutine(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
generator = function(*args, **kwargs)
next(generator)
return generator
return wrapper
@coroutine
def key_handler(successor=None):
while True:
event = (yield)
if event.kind == Event.KEYPRESS:
print("Press: {}".format(event))
elif successor is not None:
successor.send(event)
@coroutine
def mouse_handler(successor=None):
while True:
event = (yield)
if event.kind == Event.Mouse:
print("Click: {}".format(event))
elif successor is not None:
successor.send(event)
@coroutine
def debug_handler(successor, file=sys.stdout):
while True:
event = (yield)
file.write("*DEBUG*: {}\n".format(event))
successor.send(event)
# In this example, the value will first be
# sent to the key_handler() coroutine, which will either handle the event or pass it on.
pipeline = key_handler(mouse_handler())
pipeline = debug_handler(pipeline)
while True:
event = Event.next()
if event.kind == Event.TERMINATE:
break
pipeline.send(event) |
from gol import GOL_DICTIONARY
def show_topic(question,options_dictionary):
"""Prints the question, and options"""
print question
for option in sorted(options_dictionary):
print option, options_dictionary[option]
def get_next_topic(topic, sub_topic):
user_input = raw_input(">> ").upper()
return GOL_DICTIONARY["INTRO"]["Options"][user_input].upper()
def get_answer_key():
user_input = raw_input(">> ").upper()
return user_input
def keep_going():
keep_going_input = raw_input("Do you want to play again? ").lower()
if keep_going_input == "yes":
return True
else:
print "Bye Felicia!"
return False
print "Welcome to the game of life! The goal of the game is to get to know Alice a little better."
topic = "INTRO"
sub_topic = "Options"
while True:
show_topic("What are you interested in learning about?", GOL_DICTIONARY[topic][sub_topic])
topic = get_next_topic(topic, sub_topic)
sub_topic = "Options"
show_topic("Awesome! What would you like to know about that topic?", GOL_DICTIONARY[topic][sub_topic])
answer_key = get_answer_key()
print(GOL_DICTIONARY[topic]['Answers'][answer_key])
if keep_going() == True:
topic = "INTRO"
sub_topic = "Options"
continue
else:
break
# lets_play = raw_input("Are you ready to play the game of Alice's life? ")
# if lets_play == "yes":
# print ("Welcome to the game of Alice's life. The object of the game is to "
# "get to know Alice a little better and have fun. ")
# while True:
# main_menu = raw_input("Where do you want to start?\n"
# "A. Traveling experiences\n"
# "B. Career decisions\n"
# "C. Extracurricular activities\n"
# "D. Nevermind, I don't want to play right now\n"
# )
# if main_menu == "D":
# break
# elif main_menu == "A":
# traveling_menu = raw_input("""Awesome! I've been lucky to have some
# amazing traveling experiences, which do you want to learn more about?
# 1. Volunteering in Ghana
# 2. Studying in Prague
# 3. Teaching in Thailand
# 4. Opps, I made the wrong choice, take me back to the main menu
# """)
# if traveling_menu == "1":
# print """Cool! Let me tell you about Ghana..."""
# elif traveling_menu == "2":
# print """Prague! Where to begin..."""
# elif traveling_menu == "3":
# print """Thailand! Such a beautiful place"""
# else:
# main_menu
# elif main_menu == "B":
# career_menu = raw_input("""/nWork, work, work, work, work. What choice do you want to
# dive into?
# 1. To teach or not to teach
# 2. Nonprofit or tech
# 3. Product vs. sales
# """)
# if career_menu == "1":
# print """Learning to teach the hard way..."""
# elif career_menu == "2":
# print """Finding a job where you can give back and have intellectual stimulation."""
# elif career_menu == "3":
# print """Getting sold on product"""
# else:
# main_menu
# elif main_menu == "C":
# extracurricular_menu = raw_input("""These are a few of my favorite things! Which do you want
# to know more about?
# # 1. Big Brothers Big Sisters
# # 2. Women's rights
# # 3. Product vs. sales
# # """)
# # if extracurricular_menu == "1":
# # print """Envee is the coolest chick you'll ever meet."""
# # elif extracurricular_menu == "2":
# # print """I'm from a family of four women. I'm a proud feminist!"""
# # elif extracurricular_menu == "3":
# # print """Coding is how I made this game!"""
# # else:
# # main_menu
|
import pandas as pd
import numpy as np
import tldextract
import dateparser
from cleanco import prepare_terms, basename
import unidecode
import re
import string
class Preprocessing():
def __init__(self, df):
self.df_apps_match = df
self.google_play_df = self.df_apps_match[self.df_apps_match['store'].values.astype(int) == 0]
self.app_store_df = self.df_apps_match[self.df_apps_match['store'].values.astype(int) == 1]
self.google_play_df_after_eda = pd.DataFrame()
self.app_store_df_after_eda = pd.DataFrame()
self.google_play_df_after_eda_10 = pd.DataFrame()
self.app_store_df_after_eda_10 = pd.DataFrame()
def pipeline(self):
# self.clean_columns()
#self.create_matched_tables() # todo: for the tensor
self.add_id()
self.preprocessing_maincategory()
self.preprocessing_titles()
self.preprocessing_author()
self.preprocessing_devsite()
# self.preprocessing_releasedate()
self.preprocessing_description()
self.add_matching()
self.divide_data_80_20()
self.save_csvs()
def add_id(self):
self.google_play_df_after_eda['id'] = self.google_play_df['id']
self.app_store_df_after_eda['id'] = self.app_store_df['id']
def preprocessing_maincategory(self):
# Change from apple catagories ids to string catagories
self.app_store_df_after_eda['maincategory'] = self.app_store_df.loc[:, 'maincategory'].replace(['6000',
'6001', '6002', '6003', '6004', '6005', '6006', '6007', '6008',
'6009', '6010', '6011', '6012', '6013', '6014', '6015', '6016',
'6017',
'6018', '6020', '6021', '6023', '6024', '6025', '6026', '6027'],
['Business', 'Weather', 'Utilities', 'Travel', 'Sports',
'Social Networking', 'Reference', 'Productivity', 'Photo Video',
'News',
'Navigation', 'Music', 'Lifestyle', 'Health and Fitness', 'Games',
'Finance', 'Entertainment', 'Education',
'Books', 'Medical', 'Magazines and Newspapers', 'Food and Drink',
'Shopping', 'Stickers', 'Developer Tools', 'Graphics and Design'])
# Change from google play catagories to apple catagories
self.google_play_df_after_eda['maincategory'] = self.google_play_df.loc[:, 'maincategory'].replace( ['BOOKS_AND_REFERENCE', 'BUSINESS',
'EDUCATION', 'ENTERTAINMENT', 'FINANCE', 'FOOD_AND_DRINK',
'GAME_ACTION', 'GAME_ADVENTURE', 'GAME_ARCADE', 'GAME_BOARD', 'GAME_CARD', 'GAME_CASINO',
'GAME_CASUAL', 'GAME_EDUCATIONAL', 'GAME_MUSIC', 'GAME_PUZZLE', 'GAME_RACING', 'GAME_ROLE_PLAYING',
'GAME_SIMULATION', 'GAME_SPORTS', 'GAME_STRATEGY', 'GAME_TRIVIA', 'GAME_WORD',
'HEALTH_AND_FITNESS', 'LIFESTYLE', 'MAPS_AND_NAVIGATION', 'MEDICAL', 'MUSIC_AND_AUDIO',
'NEWS_AND_MAGAZINES',
'PHOTOGRAPHY', 'PRODUCTIVITY', 'SHOPPING', 'SOCIAL', 'SPORTS', 'SPORTS_GAMES', 'TRAVEL_AND_LOCAL',
'VIDEO_PLAYERS', 'WEATHER'],
[
'Books', 'Business', 'Education', 'Entertainment', 'Finance', 'Food and Drink',
'Games', 'Games', 'Games', 'Games', 'Games', 'Games',
'Games', 'Games', 'Games', 'Games', 'Games', 'Games',
'Games', 'Games', 'Games', 'Games', 'Games',
'Health and Fitness', 'Lifestyle', 'Navigation', 'Medical', 'Music', 'Magazines and Newspapers',
'Photo Video', 'Productivity', 'Shopping', 'Social Networking', 'Sports', 'Sports',
'Travel', 'Photo Video', 'Weather'])
def preprocessing_titles(self):
# lower case the titles and seperate the title
def create_title(titles):
return [title.lower().strip().partition(':')[0].partition('-')[0].partition(' ')[0] for title in titles] #todo: ask davis if need it also for athuor
self.google_play_df_after_eda['title'] = create_title(self.google_play_df['title'])
self.app_store_df_after_eda['title'] = create_title(self.app_store_df['title'])
def preprocessing_author(self):
def create_author(authors):
terms = prepare_terms()
# Running twice in order to remove multiple endings, i.e Co., Ltd.
authors = [basename(author.lower().strip(), terms, prefix=True, middle=True, suffix=True) for author in authors]
authors = [basename(author, terms, prefix=True, middle=True, suffix=True).partition(' ')[0] for author in authors]
return authors
self.google_play_df_after_eda['author'] = create_author(self.google_play_df['author'])
self.app_store_df_after_eda['author'] = create_author(self.app_store_df['author'])
def preprocessing_devsite(self):
def create_devsite(devsites):
return [tldextract.extract(devsite.lower().strip()).domain for devsite in devsites]
self.google_play_df_after_eda['devsite'] = create_devsite(self.google_play_df['devsite'].values.astype(str)) #np.resize(create_devsite(self.google_play_df['devsite'].values.astype(str)), len(self.google_play_df_after_eda))
self.app_store_df_after_eda['devsite'] = create_devsite(self.app_store_df['devsite'].values.astype(str)) #np.resize(create_devsite(self.app_store_df['devsite'].values.astype(str)), len(self.app_store_df_after_eda))
def preprocessing_releasedate(self):
def parse_date(date):
if not isinstance(date, str):
# always nan values
return
return dateparser.parse(date)
self.google_play_df_after_eda['releasedate'] = pd.to_datetime(self.google_play_df['releasedate'].apply(parse_date), errors = 'coerce')
#self.google_play_df['releasedate'].apply(parse_date).values.astype('datetime64[D]')
self.app_store_df_after_eda['releasedate'] = pd.to_datetime(self.app_store_df['releasedate'].apply(parse_date), errors = 'coerce')
def preprocessing_description(self): #todo: make it better..
def create_descriptions(descriptions):
return [unidecode.unidecode(re.sub(r'\d+', '', description)).lower().translate(str.maketrans("", "", string.punctuation)).strip() for description in descriptions]
self.google_play_df_after_eda['description'] = create_descriptions(self.google_play_df['description']) #np.resize(create_descriptions(self.google_play_df['description'].values.astype(str)), len(self.google_play_df_after_eda))
self.app_store_df_after_eda['description'] = create_descriptions(self.app_store_df['description']) #np.resize(create_descriptions(self.app_store_df['description'].values.astype(str)), len(self.app_store_df_after_eda))
def add_matching(self):
self.google_play_df_after_eda['id_matched'] = self.google_play_df['id_matched']
self.app_store_df_after_eda['id_matched'] = self.app_store_df['id_matched']
def divide_data_80_20(self):
for i, google_play_app in self.google_play_df.iterrows():
if i%10 == 0:
self.google_play_df_after_eda_10 = self.google_play_df_after_eda_10.append(google_play_app, ignore_index=True)
self.app_store_df_after_eda_10 = self.app_store_df_after_eda_10.append(self.app_store_df_after_eda.loc[self.app_store_df_after_eda['id_matched'] == google_play_app['id']])
self.google_play_df_after_eda.drop(i, inplace=True)
self.app_store_df_after_eda.drop(self.app_store_df_after_eda[self.app_store_df_after_eda['id_matched'] == google_play_app['id']].index, inplace=True)
def save_csvs(self):
self.google_play_df_after_eda.to_csv(r'google_play_after_eda.csv', index=False, header=True)
self.app_store_df_after_eda.to_csv(r'app_store_after_eda.csv', index=False, header=True)
self.google_play_df_after_eda_10.to_csv(r'google_play_after_eda_10.csv', index=False, header=True)
self.app_store_df_after_eda_10.to_csv(r'app_store_after_eda_10.csv', index=False, header=True)
def clean_columns(self): # stay with columns that no Nan in app_store
for column in self.app_store_df.columns:
if self.app_store_df[column].isnull().all():
self.app_store_df = self.app_store_df.drop(column, axis=1)
self.google_play_df = self.google_play_df.drop(column, axis=1)
def create_matched_tables(self):
self.google_play_df = self.google_play_df.rename(columns={'id_matched': 'ios_id'})
self.app_store_df = self.app_store_df.rename(columns={'id_matched': 'android_id'})
self.matched_app_store = pd.merge(self.app_store_df, self.google_play_df, left_on='android_id', right_on='id',
suffixes=('_app_store', '_google_play'))
self.matched_google_play = pd.merge(self.google_play_df, self.app_store_df, left_on='ios_id', right_on='id',
suffixes=('_google_play', '_app_store'))
df = pd.read_csv('../matching_small.csv', low_memory=False)
P = Preprocessing(df)
P.pipeline()
|
import numpy as np
import numpy.core.records as ncr
a= np.array([0,1,2,3,4,5,6,7,3,3])
print(a)
print(ncr.array([0,1,2,3,4,5]))
print(a.ndim)
print("Shape --",a.shape)
b = a.reshape((2,5))
print("Reshape ",b)
b[0][4]=77
print("After",b)
print(a*2)
print(a**2)
print([0,1,2,3,4,5,3,4,6,7,3,3]*2)
print(a>4)
c = np.array([1,2,np.NAN,shruti,3,4])
print(np.isnan(c)) |
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test OGR VRT driver functionality.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <warmerdam@pobox.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
###############################################################################
import os
import sys
import string
sys.path.append( '../pymod' )
import gdaltest
import ogrtest
import ogr
import osr
import gdal
###############################################################################
# Open VRT datasource.
def ogr_vrt_1():
gdaltest.vrt_ds = ogr.Open( 'data/vrt_test.vrt' )
if gdaltest.vrt_ds is not None:
return 'success'
else:
return 'fail'
###############################################################################
# Verify the geometries, in the "test2" layer based on x,y,z columns.
#
# Also tests FID-copied-from-source.
def ogr_vrt_2():
if gdaltest.vrt_ds is None:
return 'skip'
lyr = gdaltest.vrt_ds.GetLayerByName( 'test2' )
extent = lyr.GetExtent()
if extent != (12.5, 100.0, 17.0, 200.0):
gdaltest.post_reason('wrong extent')
print(extent)
return 'fail'
expect = ['First', 'Second']
tr = ogrtest.check_features_against_list( lyr, 'other', expect )
if not tr:
return 'fail'
lyr.ResetReading()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat,'POINT(12.5 17 1.2)',
max_error = 0.000000001 ) != 0:
return 'fail'
if feat.GetFID() != 0:
gdaltest.post_reason( 'Unexpected fid' )
return 'fail'
feat.Destroy()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat,'POINT(100 200)',
max_error = 0.000000001 ) != 0:
return 'fail'
if feat.GetFID() != 1:
gdaltest.post_reason( 'Unexpected fid' )
return 'fail'
feat.Destroy()
return 'success'
###############################################################################
# Same test on layer 3 derived from WKT column.
#
# Also tests FID-from-attribute.
def ogr_vrt_3():
if gdaltest.vrt_ds is None:
return 'skip'
lyr = gdaltest.vrt_ds.GetLayerByName( 'test3' )
expect = ['First', 'Second']
tr = ogrtest.check_features_against_list( lyr, 'other', expect )
if not tr:
return 'fail'
lyr.ResetReading()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat,'POINT(12.5 17 1.2)',
max_error = 0.000000001 ) != 0:
return 'fail'
if feat.GetFID() != 1:
gdaltest.post_reason( 'Unexpected fid' )
return 'fail'
feat.Destroy()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat,'POINT(100 200)',
max_error = 0.000000001 ) != 0:
return 'fail'
if feat.GetFID() != 2:
gdaltest.post_reason( 'Unexpected fid' )
return 'fail'
feat.Destroy()
return 'success'
###############################################################################
# Test a spatial query.
def ogr_vrt_4():
if gdaltest.vrt_ds is None:
return 'skip'
lyr = gdaltest.vrt_ds.GetLayerByName( 'test3' )
lyr.ResetReading()
lyr.SetSpatialFilterRect( 90, 90, 300, 300 )
expect = ['Second']
tr = ogrtest.check_features_against_list( lyr, 'other', expect )
if not tr:
return 'fail'
lyr.ResetReading()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat,'POINT(100 200)',
max_error = 0.000000001 ) != 0:
return 'fail'
feat.Destroy()
lyr.SetSpatialFilter( None )
return 'success'
###############################################################################
# Test an attribute query.
def ogr_vrt_5():
lyr = gdaltest.vrt_ds.GetLayerByName( 'test3' )
lyr.ResetReading()
lyr.SetAttributeFilter( 'x < 50' )
expect = ['First']
tr = ogrtest.check_features_against_list( lyr, 'other', expect )
if not tr:
return 'fail'
lyr.ResetReading()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat,'POINT(12.5 17 1.2)',
max_error = 0.000000001 ) != 0:
return 'fail'
feat.Destroy()
lyr.SetAttributeFilter( None )
return 'success'
###############################################################################
# Test GetFeature() on layer with FID coming from a column.
def ogr_vrt_6():
if gdaltest.vrt_ds is None:
return 'skip'
lyr = gdaltest.vrt_ds.GetLayerByName( 'test3' )
lyr.ResetReading()
feat = lyr.GetFeature( 2 )
if feat.GetField( 'other' ) != 'Second':
gdaltest.post_reason( 'GetFeature() did not work properly.' )
return 'fail'
feat.Destroy()
return 'success'
###############################################################################
# Same as test 3, but on the result of an SQL query.
#
def ogr_vrt_7():
if gdaltest.vrt_ds is None:
return 'skip'
lyr = gdaltest.vrt_ds.GetLayerByName( 'test4' )
expect = ['First', 'Second']
tr = ogrtest.check_features_against_list( lyr, 'other', expect )
if not tr:
return 'fail'
lyr.ResetReading()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat,'POINT(12.5 17 1.2)',
max_error = 0.000000001 ) != 0:
return 'fail'
if feat.GetFID() != 1:
gdaltest.post_reason( 'Unexpected fid' )
return 'fail'
feat.Destroy()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat,'POINT(100 200)',
max_error = 0.000000001 ) != 0:
return 'fail'
if feat.GetFID() != 2:
gdaltest.post_reason( 'Unexpected fid' )
return 'fail'
feat.Destroy()
return 'success'
###############################################################################
# Similar test, but now we put the whole VRT contents directly into the
# "filename".
#
def ogr_vrt_8():
if gdaltest.vrt_ds is None:
return 'skip'
vrt_xml = '<OGRVRTDataSource><OGRVRTLayer name="test4"><SrcDataSource relativeToVRT="0">data/flat.dbf</SrcDataSource><SrcSQL>SELECT * FROM flat</SrcSQL><FID>fid</FID><GeometryType>wkbPoint</GeometryType><GeometryField encoding="PointFromColumns" x="x" y="y" z="z"/></OGRVRTLayer></OGRVRTDataSource>'
ds = ogr.Open( vrt_xml )
lyr = ds.GetLayerByName( 'test4' )
expect = ['First', 'Second']
tr = ogrtest.check_features_against_list( lyr, 'other', expect )
if not tr:
return 'fail'
lyr.ResetReading()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat,'POINT(12.5 17 1.2)',
max_error = 0.000000001 ) != 0:
return 'fail'
if feat.GetFID() != 1:
gdaltest.post_reason( 'Unexpected fid' )
return 'fail'
feat.Destroy()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat,'POINT(100 200)',
max_error = 0.000000001 ) != 0:
return 'fail'
if feat.GetFID() != 2:
gdaltest.post_reason( 'Unexpected fid' )
return 'fail'
feat.Destroy()
ds.Destroy()
ds = None
return 'success'
###############################################################################
# Test that attribute filters are passed through to an underlying layer.
def ogr_vrt_9():
if gdaltest.vrt_ds is None:
return 'skip'
lyr = gdaltest.vrt_ds.GetLayerByName( 'test3' )
lyr.SetAttributeFilter( 'other = "Second"' )
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat.GetField( 'other' ) != 'Second':
gdaltest.post_reason( 'attribute filter did not work.' )
return 'fail'
feat.Destroy()
sub_ds = ogr.OpenShared( 'data/flat.dbf' )
sub_layer = sub_ds.GetLayerByName( 'flat' )
sub_layer.ResetReading()
if sub_layer.GetFeatureCount() != 1:
print(sub_layer.GetFeatureCount())
gdaltest.post_reason( 'attribute filter not passed to sublayer.' )
return 'fail'
lyr.SetAttributeFilter( None )
sub_ds.Release()
sub_ds = None
return 'success'
###############################################################################
# Test capabilities
#
def ogr_vrt_10():
if gdaltest.vrt_ds is None:
return 'skip'
vrt_xml = '<OGRVRTDataSource><OGRVRTLayer name="test"><SrcDataSource relativeToVRT="0">data/testpoly.shp</SrcDataSource><SrcLayer>testpoly</SrcLayer></OGRVRTLayer></OGRVRTDataSource>'
vrt_ds = ogr.Open( vrt_xml )
vrt_lyr = vrt_ds.GetLayerByName( 'test' )
src_ds = ogr.Open('data/testpoly.shp')
src_lyr = src_ds.GetLayer(0)
if vrt_lyr.TestCapability(ogr.OLCFastFeatureCount) != src_lyr.TestCapability(ogr.OLCFastFeatureCount):
return 'fail'
if vrt_lyr.TestCapability(ogr.OLCFastGetExtent) != src_lyr.TestCapability(ogr.OLCFastGetExtent):
return 'fail'
if vrt_lyr.TestCapability(ogr.OLCRandomRead) != src_lyr.TestCapability(ogr.OLCRandomRead):
return 'fail'
vrt_ds.Destroy()
vrt_ds = None
src_ds.Destroy()
src_ds = None
return 'success'
###############################################################################
# Test VRT write capabilities with PointFromColumns geometries
# Test also the reportGeomSrcColumn attribute
def ogr_vrt_11():
if gdaltest.vrt_ds is None:
return 'skip'
f = open('tmp/test.csv', 'wb')
f.write('x,val1,y,val2\n'.encode('ascii'))
f.write('2,"val11",49,"val12"\n'.encode('ascii'))
f.close()
try:
os.remove('tmp/test.csvt')
except:
pass
vrt_xml = """
<OGRVRTDataSource>
<OGRVRTLayer name="test">
<SrcDataSource relativeToVRT="0">tmp/test.csv</SrcDataSource>
<SrcLayer>test</SrcLayer>
<GeometryField encoding="PointFromColumns" x="x" y="y" reportSrcColumn="false"/>
</OGRVRTLayer>
</OGRVRTDataSource>"""
vrt_ds = ogr.Open( vrt_xml, update = 1 )
vrt_lyr = vrt_ds.GetLayerByName( 'test' )
# Only val1 and val2 attributes should be reported
if vrt_lyr.GetLayerDefn().GetFieldCount() != 2:
return 'fail'
if vrt_lyr.GetLayerDefn().GetFieldDefn(0).GetNameRef() != 'val1':
return 'fail'
if vrt_lyr.GetLayerDefn().GetFieldDefn(1).GetNameRef() != 'val2':
return 'fail'
feat = ogr.Feature(vrt_lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT (3 50)')
feat.SetGeometryDirectly(geom)
feat.SetField('val1', 'val21')
vrt_lyr.CreateFeature(feat)
feat.Destroy()
vrt_lyr.ResetReading()
feat = vrt_lyr.GetFeature(2)
geom = feat.GetGeometryRef()
if geom.ExportToWkt() != 'POINT (3 50)':
return 'fail'
if feat.GetFieldAsString('val1') != 'val21':
return 'fail'
feat.Destroy()
# The x and y fields are considered as string by default, so spatial
# filter cannot be turned into attribute filter
gdal.PushErrorHandler('CPLQuietErrorHandler')
vrt_lyr.SetSpatialFilterRect(0, 40, 10, 49.5)
ret = vrt_lyr.GetFeatureCount()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find('not declared as numeric fields') == -1:
return 'fail'
if ret != 1:
return 'fail'
vrt_ds.Destroy()
vrt_ds = None
# Add a .csvt file to specify the x and y columns as reals
f = open('tmp/test.csvt', 'wb')
f.write('Real,String,Real,String\n'.encode('ascii'))
f.close()
vrt_ds = ogr.Open( vrt_xml, update = 1 )
vrt_lyr = vrt_ds.GetLayerByName( 'test' )
vrt_lyr.SetSpatialFilterRect(0, 40, 10, 49.5)
if vrt_lyr.GetFeatureCount() != 1:
return 'fail'
if gdal.GetLastErrorMsg() != '':
return 'fail'
vrt_lyr.SetAttributeFilter("1 = 1")
if vrt_lyr.GetFeatureCount() != 1:
return 'fail'
vrt_lyr.SetAttributeFilter("1 = 0")
if vrt_lyr.GetFeatureCount() != 0:
return 'fail'
vrt_ds.Destroy()
vrt_ds = None
os.remove('tmp/test.csv')
os.remove('tmp/test.csvt')
return 'success'
###############################################################################
# Test VRT write capabilities with WKT geometries
def ogr_vrt_12():
if gdaltest.vrt_ds is None:
return 'skip'
f = open('tmp/test.csv', 'wb')
f.write('wkt_geom,val1,val2\n'.encode('ascii'))
f.write('POINT (2 49),"val11","val12"\n'.encode('ascii'))
f.close()
vrt_xml = """
<OGRVRTDataSource>
<OGRVRTLayer name="test">
<SrcDataSource relativeToVRT="0">tmp/test.csv</SrcDataSource>
<SrcLayer>test</SrcLayer>
<GeometryField encoding="WKT" field="wkt_geom"/>
</OGRVRTLayer>
</OGRVRTDataSource>"""
vrt_ds = ogr.Open( vrt_xml, update = 1 )
vrt_lyr = vrt_ds.GetLayerByName( 'test' )
feat = ogr.Feature(vrt_lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT (3 50)')
feat.SetGeometryDirectly(geom)
feat.SetField('val1', 'val21')
vrt_lyr.CreateFeature(feat)
feat.Destroy()
vrt_lyr.ResetReading()
feat = vrt_lyr.GetFeature(2)
geom = feat.GetGeometryRef()
if geom.ExportToWkt() != 'POINT (3 50)':
return 'fail'
if feat.GetFieldAsString('val1') != 'val21':
return 'fail'
feat.Destroy()
vrt_ds.Destroy()
vrt_ds = None
os.remove('tmp/test.csv')
return 'success'
###############################################################################
# Test VRT write capabilities with WKB geometries
def ogr_vrt_13():
if gdaltest.vrt_ds is None:
return 'skip'
f = open('tmp/test.csv', 'wb')
f.write('wkb_geom,val1,val2\n'.encode('ascii'))
f.close()
vrt_xml = """
<OGRVRTDataSource>
<OGRVRTLayer name="test">
<SrcDataSource relativeToVRT="0">tmp/test.csv</SrcDataSource>
<SrcLayer>test</SrcLayer>
<GeometryField encoding="WKB" field="wkb_geom"/>
</OGRVRTLayer>
</OGRVRTDataSource>"""
vrt_ds = ogr.Open( vrt_xml, update = 1 )
vrt_lyr = vrt_ds.GetLayerByName( 'test' )
feat = ogr.Feature(vrt_lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT (3 50)')
feat.SetGeometryDirectly(geom)
feat.SetField('val1', 'val21')
vrt_lyr.CreateFeature(feat)
feat.Destroy()
vrt_lyr.ResetReading()
feat = vrt_lyr.GetFeature(1)
geom = feat.GetGeometryRef()
if geom.ExportToWkt() != 'POINT (3 50)':
return 'fail'
if feat.GetFieldAsString('val1') != 'val21':
return 'fail'
feat.Destroy()
vrt_ds.Destroy()
vrt_ds = None
os.remove('tmp/test.csv')
return 'success'
###############################################################################
# Test SrcRegion element for VGS_Direct
def ogr_vrt_14():
if gdaltest.vrt_ds is None:
return 'skip'
gdal.PushErrorHandler('CPLQuietErrorHandler')
try:
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('tmp/test.shp')
except:
pass
gdal.PopErrorHandler()
shp_ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('tmp/test.shp')
shp_lyr = shp_ds.CreateLayer('test')
feat = ogr.Feature(shp_lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT (-10 49)')
feat.SetGeometryDirectly(geom)
shp_lyr.CreateFeature(feat)
feat.Destroy()
feat = ogr.Feature(shp_lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT (-10 49)')
feat.SetGeometryDirectly(geom)
shp_lyr.CreateFeature(feat)
feat.Destroy()
feat = ogr.Feature(shp_lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT (2 49)')
feat.SetGeometryDirectly(geom)
shp_lyr.CreateFeature(feat)
feat.Destroy()
feat = ogr.Feature(shp_lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT (-10 49)')
feat.SetGeometryDirectly(geom)
shp_lyr.CreateFeature(feat)
feat.Destroy()
shp_ds.ExecuteSQL('CREATE SPATIAL INDEX on test');
shp_ds.Destroy()
vrt_xml = """
<OGRVRTDataSource>
<OGRVRTLayer name="mytest">
<SrcDataSource relativeToVRT="0">tmp/test.shp</SrcDataSource>
<SrcLayer>test</SrcLayer>
<SrcRegion>POLYGON((0 40,0 50,10 50,10 40,0 40))</SrcRegion>
</OGRVRTLayer>
</OGRVRTDataSource>"""
vrt_ds = ogr.Open( vrt_xml )
vrt_lyr = vrt_ds.GetLayerByName( 'mytest' )
if vrt_lyr.TestCapability(ogr.OLCFastSpatialFilter) != 1:
gdaltest.post_reason( 'Fast filter not set.' )
return 'fail'
extent = vrt_lyr.GetExtent()
if extent != (2.0, 2.0, 49.0, 49.0):
gdaltest.post_reason('wrong extent')
print(extent)
return 'fail'
if vrt_lyr.GetFeatureCount() != 1:
gdaltest.post_reason( 'Feature count not one as expected.' )
return 'fail'
feat = vrt_lyr.GetNextFeature()
if feat.GetFID() != 2:
gdaltest.post_reason( 'did not get fid 2.' )
return 'fail'
geom = feat.GetGeometryRef()
if geom.ExportToWkt() != 'POINT (2 49)':
gdaltest.post_reason( 'did not get expected point geometry.' )
return 'fail'
feat.Destroy()
vrt_lyr.SetSpatialFilterRect(1, 41, 3, 49.5)
if vrt_lyr.GetFeatureCount() != 1:
if gdal.GetLastErrorMsg().find('GEOS support not enabled') != -1:
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('tmp/test.shp')
return 'skip'
print(vrt_lyr.GetFeatureCount())
gdaltest.post_reason( 'did not get one feature on rect spatial filter.' )
return 'fail'
vrt_lyr.SetSpatialFilterRect(1, 41, 3, 48.5)
if vrt_lyr.GetFeatureCount() != 0:
gdaltest.post_reason( 'Did not get expected zero feature count.')
return 'fail'
vrt_lyr.SetSpatialFilter(None)
if vrt_lyr.GetFeatureCount() != 1:
gdaltest.post_reason( 'Did not get expected one feature count with no filter.')
return 'fail'
vrt_ds.Destroy()
vrt_ds = None
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('tmp/test.shp')
return 'success'
###############################################################################
# Test SrcRegion element for VGS_WKT
def ogr_vrt_15():
if gdaltest.vrt_ds is None:
return 'skip'
f = open('tmp/test.csv', 'wb')
f.write('wkt_geom,val1,val2\n'.encode('ascii'))
f.write('POINT (-10 49),,\n'.encode('ascii'))
f.write('POINT (-10 49),,\n'.encode('ascii'))
f.write('POINT (2 49),,\n'.encode('ascii'))
f.write('POINT (-10 49),,\n'.encode('ascii'))
f.close()
vrt_xml = """
<OGRVRTDataSource>
<OGRVRTLayer name="test">
<SrcDataSource relativeToVRT="0">tmp/test.csv</SrcDataSource>
<SrcLayer>test</SrcLayer>
<GeometryField encoding="WKT" field="wkt_geom"/>
<SrcRegion>POLYGON((0 40,0 50,10 50,10 40,0 40))</SrcRegion>
</OGRVRTLayer>
</OGRVRTDataSource>"""
vrt_ds = ogr.Open( vrt_xml )
vrt_lyr = vrt_ds.GetLayerByName( 'test' )
if vrt_lyr.TestCapability(ogr.OLCFastSpatialFilter) != 0:
return 'fail'
if vrt_lyr.GetFeatureCount() != 1:
return 'fail'
feat = vrt_lyr.GetNextFeature()
if feat.GetFID() != 3:
return 'fail'
geom = feat.GetGeometryRef()
if geom.ExportToWkt() != 'POINT (2 49)':
return 'fail'
feat.Destroy()
vrt_lyr.SetSpatialFilterRect(1, 41, 3, 49.5)
if vrt_lyr.GetFeatureCount() != 1:
return 'fail'
vrt_lyr.SetSpatialFilterRect(1, 41, 3, 48.5)
if vrt_lyr.GetFeatureCount() != 0:
return 'fail'
vrt_lyr.SetSpatialFilter(None)
if vrt_lyr.GetFeatureCount() != 1:
return 'fail'
vrt_ds.Destroy()
vrt_ds = None
os.remove('tmp/test.csv')
return 'success'
###############################################################################
# Test SrcRegion element for VGS_PointFromColumns
def ogr_vrt_16():
if gdaltest.vrt_ds is None:
return 'skip'
f = open('tmp/test.csvt', 'wb')
f.write('Real,Real,String,String\n'.encode('ascii'))
f.close()
f = open('tmp/test.csv', 'wb')
f.write('x,y,val1,val2\n'.encode('ascii'))
f.write('-10,49,,\n'.encode('ascii'))
f.write('-10,49,,\n'.encode('ascii'))
f.write('2,49,,\n'.encode('ascii'))
f.write('-10,49,,\n'.encode('ascii'))
f.close()
vrt_xml = """
<OGRVRTDataSource>
<OGRVRTLayer name="test">
<SrcDataSource relativeToVRT="0">tmp/test.csv</SrcDataSource>
<SrcLayer>test</SrcLayer>
<GeometryField encoding="PointFromColumns" x="x" y="y"/>
<SrcRegion>POLYGON((0 40,0 50,10 50,10 40,0 40))</SrcRegion>
</OGRVRTLayer>
</OGRVRTDataSource>"""
vrt_ds = ogr.Open( vrt_xml )
vrt_lyr = vrt_ds.GetLayerByName( 'test' )
if vrt_lyr.TestCapability(ogr.OLCFastSpatialFilter) != 0:
return 'fail'
if vrt_lyr.GetFeatureCount() != 1:
return 'fail'
feat = vrt_lyr.GetNextFeature()
if feat.GetFID() != 3:
return 'fail'
geom = feat.GetGeometryRef()
if geom.ExportToWkt() != 'POINT (2 49)':
return 'fail'
feat.Destroy()
vrt_lyr.SetSpatialFilterRect(1, 41, 3, 49.5)
if vrt_lyr.GetFeatureCount() != 1:
if gdal.GetLastErrorMsg().find('GEOS support not enabled') != -1:
vrt_ds.Destroy()
os.remove('tmp/test.csv')
os.remove('tmp/test.csvt')
return 'skip'
return 'fail'
vrt_lyr.SetSpatialFilterRect(1, 41, 3, 48.5)
if vrt_lyr.GetFeatureCount() != 0:
return 'fail'
vrt_lyr.SetSpatialFilter(None)
if vrt_lyr.GetFeatureCount() != 1:
return 'fail'
vrt_ds.Destroy()
vrt_ds = None
os.remove('tmp/test.csv')
os.remove('tmp/test.csvt')
return 'success'
###############################################################################
# Test explicit field definitions.
def ogr_vrt_17():
if gdaltest.vrt_ds is None:
return 'skip'
vrt_xml = """
<OGRVRTDataSource>
<OGRVRTLayer name="test">
<SrcDataSource relativeToVRT="0">data/prime_meridian.csv</SrcDataSource>
<SrcLayer>prime_meridian</SrcLayer>
<Field name="pm_code" src="PRIME_MERIDIAN_CODE" type="integer" width="4" />
<Field name="prime_meridian_name" width="24" />
<Field name="new_col" type="Real" width="12" precision="3" />
</OGRVRTLayer>
</OGRVRTDataSource>"""
vrt_ds = ogr.Open( vrt_xml )
vrt_lyr = vrt_ds.GetLayerByName( 'test' )
if vrt_lyr.GetLayerDefn().GetFieldCount() != 3:
gdaltest.post_reason( 'unexpected field count.' )
return 'fail'
flddef = vrt_lyr.GetLayerDefn().GetFieldDefn(0)
if flddef.GetName() != 'pm_code' \
or flddef.GetType() != ogr.OFTInteger \
or flddef.GetWidth() != 4 \
or flddef.GetPrecision() != 0:
gdaltest.post_reason( 'pm_code field definition wrong.' )
return 'fail'
flddef = vrt_lyr.GetLayerDefn().GetFieldDefn(1)
if flddef.GetName() != 'prime_meridian_name' \
or flddef.GetType() != ogr.OFTString \
or flddef.GetWidth() != 24 \
or flddef.GetPrecision() != 0:
gdaltest.post_reason( 'prime_meridian_name field definition wrong.' )
return 'fail'
flddef = vrt_lyr.GetLayerDefn().GetFieldDefn(2)
if flddef.GetName() != 'new_col' \
or flddef.GetType() != ogr.OFTReal \
or flddef.GetWidth() != 12 \
or flddef.GetPrecision() != 3:
gdaltest.post_reason( 'new_col field definition wrong.' )
return 'fail'
feat = vrt_lyr.GetNextFeature()
if feat.GetField(0) != 8901 or feat.GetField(1) != "Greenwich" \
or feat.GetField(2) != None:
gdaltest.post_reason( 'did not get expected field value(s).' )
return 'fail'
feat.Destroy()
vrt_ds.Destroy()
vrt_ds = None
return 'success'
###############################################################################
# Test that attribute filters are *not* passed to sublayer by default
# when explicit fields are defined.
def ogr_vrt_18():
if gdaltest.vrt_ds is None:
return 'skip'
vrt_xml = """
<OGRVRTDataSource>
<OGRVRTLayer name="test">
<SrcDataSource relativeToVRT="0">data/prime_meridian.csv</SrcDataSource>
<SrcLayer>prime_meridian</SrcLayer>
<Field name="pm_code" src="PRIME_MERIDIAN_CODE" type="integer" width="4" />
<Field name="prime_meridian_name" width="24" />
<Field name="new_col" type="Real" width="12" precision="3" />
</OGRVRTLayer>
</OGRVRTDataSource>"""
vrt_ds = ogr.Open( vrt_xml )
vrt_lyr = vrt_ds.GetLayerByName( 'test' )
vrt_lyr.SetAttributeFilter( 'pm_code=8904' )
feat = vrt_lyr.GetNextFeature()
if feat.GetField(0) != 8904:
gdaltest.post_reason( 'Attribute filter not working properly' )
return 'fail'
feat.Destroy()
vrt_ds.Destroy()
vrt_ds = None
return 'success'
###############################################################################
# Run test_ogrsf (optimized path)
def ogr_vrt_19_optimized():
if gdaltest.vrt_ds is None:
return 'skip'
import test_cli_utilities
if test_cli_utilities.get_test_ogrsf_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -ro data/poly_vrt.vrt')
if ret.find('INFO') == -1 or ret.find('ERROR') != -1:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Run test_ogrsf (non optimized path)
def ogr_vrt_19_nonoptimized():
if gdaltest.vrt_ds is None:
return 'skip'
import test_cli_utilities
if test_cli_utilities.get_test_ogrsf_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -ro data/poly_nonoptimized_vrt.vrt')
if ret.find('INFO') == -1 or ret.find('ERROR') != -1:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test VGS_Direct
def ogr_vrt_20():
if gdaltest.vrt_ds is None:
return 'skip'
gdal.PushErrorHandler('CPLQuietErrorHandler')
try:
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('tmp/test.shp')
except:
pass
gdal.PopErrorHandler()
shp_ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('tmp/test.shp')
shp_lyr = shp_ds.CreateLayer('test')
feat = ogr.Feature(shp_lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT (-10 45)')
feat.SetGeometryDirectly(geom)
shp_lyr.CreateFeature(feat)
feat.Destroy()
feat = ogr.Feature(shp_lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT (-10 49)')
feat.SetGeometryDirectly(geom)
shp_lyr.CreateFeature(feat)
feat.Destroy()
feat = ogr.Feature(shp_lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT (2 49)')
feat.SetGeometryDirectly(geom)
shp_lyr.CreateFeature(feat)
feat.Destroy()
feat = ogr.Feature(shp_lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT (-10 49)')
feat.SetGeometryDirectly(geom)
shp_lyr.CreateFeature(feat)
feat.Destroy()
shp_ds.ExecuteSQL('CREATE SPATIAL INDEX on test');
shp_ds.Destroy()
vrt_xml = """
<OGRVRTDataSource>
<OGRVRTLayer name="mytest">
<SrcDataSource relativeToVRT="0">tmp/test.shp</SrcDataSource>
<SrcLayer>test</SrcLayer>
</OGRVRTLayer>
</OGRVRTDataSource>"""
vrt_ds = ogr.Open( vrt_xml )
vrt_lyr = vrt_ds.GetLayerByName( 'mytest' )
if vrt_lyr.TestCapability(ogr.OLCFastFeatureCount) != 1:
gdaltest.post_reason( 'Fast feature count not set.' )
return 'fail'
if vrt_lyr.TestCapability(ogr.OLCFastSpatialFilter) != 1:
gdaltest.post_reason( 'Fast filter not set.' )
return 'fail'
if vrt_lyr.TestCapability(ogr.OLCFastGetExtent) != 1:
gdaltest.post_reason( 'Fast extent not set.' )
return 'fail'
extent = vrt_lyr.GetExtent()
if extent != (-10.0, 2.0, 45.0, 49.0):
gdaltest.post_reason('wrong extent')
print(extent)
return 'fail'
if vrt_lyr.GetFeatureCount() != 4:
gdaltest.post_reason( 'Feature count not 4 as expected.' )
return 'fail'
vrt_lyr.SetSpatialFilterRect(1, 48.5, 3, 49.5)
if vrt_lyr.GetFeatureCount() != 1:
if gdal.GetLastErrorMsg().find('GEOS support not enabled') != -1:
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('tmp/test.shp')
return 'skip'
print(vrt_lyr.GetFeatureCount())
gdaltest.post_reason( 'did not get one feature on rect spatial filter.' )
return 'fail'
if vrt_lyr.TestCapability(ogr.OLCFastFeatureCount) != 1:
gdaltest.post_reason( 'Fast feature count not set.' )
return 'fail'
if vrt_lyr.TestCapability(ogr.OLCFastGetExtent) != 1:
gdaltest.post_reason( 'Fast extent not set.' )
return 'fail'
extent = vrt_lyr.GetExtent()
# the shapefile driver currently doesn't change the extent even in the
# presence of a spatial filter, so that could change in the future
if extent != (-10.0, 2.0, 45.0, 49.0):
gdaltest.post_reason('wrong extent')
print(extent)
return 'fail'
vrt_lyr.SetSpatialFilterRect(1, 48, 3, 48.5)
if vrt_lyr.GetFeatureCount() != 0:
gdaltest.post_reason( 'Did not get expected zero feature count.')
return 'fail'
vrt_lyr.SetSpatialFilter(None)
if vrt_lyr.GetFeatureCount() != 4:
gdaltest.post_reason( 'Feature count not 4 as expected with no filter.')
return 'fail'
vrt_ds.Destroy()
vrt_ds = None
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('tmp/test.shp')
return 'success'
###############################################################################
# Test lazy initialization with valid layer
def ogr_vrt_21_internal():
if gdaltest.vrt_ds is None:
return 'skip'
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
if lyr.GetName() != 'test3':
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
if lyr.GetGeomType() != ogr.wkbPoint:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
if lyr.GetSpatialRef().ExportToWkt().find('84') == -1:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
lyr.ResetReading()
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
if lyr.GetNextFeature() is None:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
if lyr.GetFeature(1) is None:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
if lyr.GetFeatureCount() == 0:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
if lyr.SetNextByIndex(1) != 0:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
if lyr.GetLayerDefn().GetFieldCount() == 0:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
if lyr.SetAttributeFilter('') != 0:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
lyr.SetSpatialFilter(None)
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
if lyr.TestCapability(ogr.OLCFastFeatureCount) != 1:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
if lyr.GetExtent() is None:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
if lyr.GetFIDColumn() != 'fid':
return 'fail'
ds = None
feature_defn = ogr.FeatureDefn()
feat = ogr.Feature(feature_defn)
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
lyr.CreateFeature(feat)
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
lyr.SetFeature(feat)
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
lyr.DeleteFeature(0)
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test3')
lyr.SyncToDisk()
ds = None
return 'success'
def ogr_vrt_21():
gdal.PushErrorHandler('CPLQuietErrorHandler')
try:
ret = ogr_vrt_21_internal()
except:
ret = 'fail'
gdal.PopErrorHandler()
return ret
###############################################################################
# Test lazy initialization with invalid layer
def ogr_vrt_22_internal():
if gdaltest.vrt_ds is None:
return 'skip'
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
if lyr.GetName() != 'test5':
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
if lyr.GetGeomType() != ogr.wkbPoint:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
if lyr.GetSpatialRef().ExportToWkt().find('84') == -1:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
lyr.ResetReading()
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
if lyr.GetNextFeature() is not None:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
if lyr.GetFeature(1) is not None:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
if lyr.GetFeatureCount() != 0:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
if lyr.SetNextByIndex(1) == 0:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
if lyr.GetLayerDefn().GetFieldCount() != 0:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
if lyr.SetAttributeFilter('') == 0:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
lyr.SetSpatialFilter(None)
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
if lyr.TestCapability(ogr.OLCFastFeatureCount) != 0:
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
if lyr.GetFIDColumn() != '':
return 'fail'
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
lyr.GetExtent()
ds = None
feature_defn = ogr.FeatureDefn()
feat = ogr.Feature(feature_defn)
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
lyr.CreateFeature(feat)
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
lyr.SetFeature(feat)
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
lyr.DeleteFeature(0)
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
lyr.SyncToDisk()
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
lyr.StartTransaction()
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
lyr.CommitTransaction()
ds = None
ds = ogr.Open('data/vrt_test.vrt')
lyr = ds.GetLayerByName('test5')
lyr.RollbackTransaction()
ds = None
return 'success'
def ogr_vrt_22():
gdal.PushErrorHandler('CPLQuietErrorHandler')
try:
ret = ogr_vrt_22_internal()
except:
ret = 'fail'
gdal.PopErrorHandler()
return ret
###############################################################################
# Test anti-recursion mechanism
def ogr_vrt_23(shared_ds_flag = ''):
if int(gdal.VersionInfo('VERSION_NUM')) < 1900:
gdaltest.post_reason('would crash')
return 'skip'
rec1 = """<OGRVRTDataSource>
<OGRVRTLayer name="rec1">
<SrcDataSource%s>/vsimem/rec2.vrt</SrcDataSource>
<SrcLayer>rec2</SrcLayer>
</OGRVRTLayer>
</OGRVRTDataSource>""" % shared_ds_flag
rec2 = """<OGRVRTDataSource>
<OGRVRTLayer name="rec2">
<SrcDataSource%s>/vsimem/rec1.vrt</SrcDataSource>
<SrcLayer>rec1</SrcLayer>
</OGRVRTLayer>
</OGRVRTDataSource>""" % shared_ds_flag
gdal.FileFromMemBuffer('/vsimem/rec1.vrt', rec1)
gdal.FileFromMemBuffer('/vsimem/rec2.vrt', rec2)
ds = ogr.Open('/vsimem/rec1.vrt')
if ds is None:
return 'fail'
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds.GetLayer(0).GetLayerDefn()
ds.GetLayer(0).GetFeatureCount()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('error expected !')
return 'fail'
gdal.Unlink('/vsimem/rec1.vrt')
gdal.Unlink('/vsimem/rec2.vrt')
return 'success'
###############################################################################
# Test anti-recursion mechanism on shared DS
def ogr_vrt_24():
return ogr_vrt_23(' shared="1"')
###############################################################################
# Test GetFIDColumn() (#4637)
def ogr_vrt_25():
ds = ogr.Open('data/vrt_test.vrt')
# test3 layer just declares fid, and implicit fields (so all source
# fields are taken as VRT fields), we can report the fid column
lyr = ds.GetLayerByName('test3')
if lyr.GetFIDColumn() != 'fid':
return 'fail'
# test3 layer just declares fid, and explicit fields without the fid
# column, so we can *not* report it
lyr = ds.GetLayerByName('test6')
if lyr.GetFIDColumn() != '':
return 'fail'
# test2 layer does not declare fid, and source layer has no fid column
# so nothing to report
lyr = ds.GetLayerByName('test2')
if lyr.GetFIDColumn() != '':
return 'fail'
ds = None
return 'success'
###############################################################################
# Test transaction support
def ogr_vrt_26():
if ogr.GetDriverByName('SQLite') is None:
return 'skip'
sqlite_ds = ogr.GetDriverByName('SQLite').CreateDataSource('/vsimem/ogr_vrt_26.db')
if sqlite_ds is None:
return 'skip'
lyr = sqlite_ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
lyr = None
vrt_ds = ogr.Open("""<OGRVRTDataSource>
<OGRVRTLayer name="test">
<SrcDataSource>/vsimem/ogr_vrt_26.db</SrcDataSource>
</OGRVRTLayer>
</OGRVRTDataSource>""", update = 1)
lyr = vrt_ds.GetLayer(0)
if lyr.TestCapability(ogr.OLCTransactions) == 0:
return 'fail'
lyr.StartTransaction()
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, 'foo')
lyr.CreateFeature(feat)
feat = None
if lyr.GetFeatureCount() != 1:
return 'fail'
lyr.RollbackTransaction()
if lyr.GetFeatureCount() != 0:
return 'fail'
lyr.StartTransaction()
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, 'bar')
lyr.CreateFeature(feat)
feat = None
lyr.CommitTransaction()
if lyr.GetFeatureCount() != 1:
return 'fail'
vrt_ds = None
sqlite_ds = None
ogr.GetDriverByName('SQLite').DeleteDataSource('/vsimem/ogr_vrt_26.db')
return 'success'
###############################################################################
# Test shapebin geometry
def ogr_vrt_27():
csv = """dummy,shapebin
"dummy","01000000000000000000F03F0000000000000040"
"dummy","0300000000000000000008400000000000001040000000000000144000000000000018400100000002000000000000000000000000000840000000000000104000000000000014400000000000001840"
"dummy","0500000000000000000000000000000000000000000000000000F03F000000000000F03F010000000500000000000000000000000000000000000000000000000000000000000000000000000000F03F000000000000F03F000000000000F03F000000000000F03F000000000000000000000000000000000000000000000000"
"""
gdal.FileFromMemBuffer('/vsimem/ogr_vrt_27.csv', csv)
ds = ogr.Open("""<OGRVRTDataSource>
<OGRVRTLayer name="ogr_vrt_27">
<SrcDataSource relativeToVRT="0" shared="0">/vsimem/ogr_vrt_27.csv</SrcDataSource>
<GeometryField encoding="shape" field="shapebin"/>
<Field name="foo"/>
</OGRVRTLayer>
</OGRVRTDataSource>""")
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
wkt_list = [ 'POINT (1 2)', 'LINESTRING (3 4,5 6)', 'POLYGON ((0 0,0 1,1 1,1 0,0 0))' ]
feat = lyr.GetNextFeature()
i = 0
while feat is not None:
if ogrtest.check_feature_geometry(feat, wkt_list[i]) != 0:
return 'fail'
feat = lyr.GetNextFeature()
i = i + 1
ds = None
gdal.Unlink('/vsimem/ogr_vrt_27.csv')
return 'success'
###############################################################################
# Invalid VRT testing
def ogr_vrt_28():
ds = ogr.Open("data/invalid.vrt")
if ds is None:
return 'fail'
for i in range(ds.GetLayerCount()):
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
lyr = ds.GetLayer(i)
feat = lyr.GetNextFeature()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('expected failure for layer %d of datasource %s' % (i, ds.GetName()))
return 'fail'
ds = None
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = ogr.Open("<OGRVRTDataSource><OGRVRTLayer/></OGRVRTDataSource>")
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('expected datasource opening failure')
return 'fail'
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = ogr.Open("data/invalid2.vrt")
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('expected datasource opening failure')
return 'fail'
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = ogr.Open("data/invalid3.vrt")
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('expected datasource opening failure')
return 'fail'
return 'success'
###############################################################################
# Test OGRVRTWarpedLayer
def ogr_vrt_29():
try:
os.unlink('tmp/ogr_vrt_29.shp')
os.unlink('tmp/ogr_vrt_29.shx')
os.unlink('tmp/ogr_vrt_29.dbf')
os.unlink('tmp/ogr_vrt_29.prj')
except:
pass
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('tmp/ogr_vrt_29.shp')
sr = osr.SpatialReference()
sr.ImportFromEPSG(4326)
lyr = ds.CreateLayer('ogr_vrt_29', srs = sr)
lyr.CreateField(ogr.FieldDefn('id', ogr.OFTInteger))
for i in range(5):
for j in range(5):
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, i * 5 + j)
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(%f %f)' % (2 + i / 5.0, 49 + j / 5.0)))
lyr.CreateFeature(feat)
feat = None
ds = None
f = open('tmp/ogr_vrt_29.vrt', 'wt')
f.write("""<OGRVRTDataSource>
<OGRVRTWarpedLayer>
<OGRVRTLayer name="ogr_vrt_29">
<SrcDataSource relativetoVRT="1">ogr_vrt_29.shp</SrcDataSource>
</OGRVRTLayer>
<TargetSRS>EPSG:32631</TargetSRS>
</OGRVRTWarpedLayer>
</OGRVRTDataSource>\n""")
f.close()
# Check reprojection in both directions
ds = ogr.Open('tmp/ogr_vrt_29.vrt', update = 1)
lyr = ds.GetLayer(0)
sr = lyr.GetSpatialRef()
got_wkt = sr.ExportToWkt()
if got_wkt.find('32631') == -1:
gdaltest.post_reason('did not get expected WKT')
print(got_wkt)
return 'fail'
bb = lyr.GetExtent()
expected_bb = (426857.98771727527, 485607.2165091355, 5427475.0501426803, 5516873.8591036052)
for i in range(4):
if abs(bb[i] - expected_bb[i]) > 1:
gdaltest.post_reason('did not get expected extent')
print(bb)
return 'fail'
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat, 'POINT(426857.987717275274917 5427937.523466162383556)') != 0:
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
feat = lyr.GetNextFeature()
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(500000 0)'))
lyr.SetFeature(feat)
feat = None
lyr.SetSpatialFilterRect(499999,-1,500001,1)
lyr.ResetReading()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat, 'POINT(500000 0)') != 0:
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
ds = None
# Check in .shp file
ds = ogr.Open('tmp/ogr_vrt_29.shp', update = 1)
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat, 'POINT(3.0 0.0)') != 0:
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
ds = None
# Check with test_ogrsf
import test_cli_utilities
if test_cli_utilities.get_test_ogrsf_path() is not None:
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -ro tmp/ogr_vrt_29.vrt')
if ret.find('INFO') == -1 or ret.find('ERROR') != -1:
gdaltest.post_reason('fail')
print(ret)
return 'fail'
try:
os.unlink('tmp/ogr_vrt_29.shp')
os.unlink('tmp/ogr_vrt_29.shx')
os.unlink('tmp/ogr_vrt_29.dbf')
os.unlink('tmp/ogr_vrt_29.prj')
except:
pass
os.unlink('tmp/ogr_vrt_29.vrt')
return 'success'
###############################################################################
# Test OGRVRTUnionLayer
def ogr_vrt_30():
for filename in [ 'tmp/ogr_vrt_30_1.shp',
'tmp/ogr_vrt_30_1.shx',
'tmp/ogr_vrt_30_1.dbf',
'tmp/ogr_vrt_30_1.prj',
'tmp/ogr_vrt_30_1.qix',
'tmp/ogr_vrt_30_2.shp',
'tmp/ogr_vrt_30_2.shx',
'tmp/ogr_vrt_30_2.dbf',
'tmp/ogr_vrt_30_2.prj',
'tmp/ogr_vrt_30_2.qix' ]:
try:
os.unlink(filename)
except:
pass
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('tmp/ogr_vrt_30_1.shp')
sr = osr.SpatialReference()
sr.ImportFromEPSG(4326)
lyr = ds.CreateLayer('ogr_vrt_30_1', srs = sr)
lyr.CreateField(ogr.FieldDefn('id1', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('id2', ogr.OFTInteger))
for i in range(5):
for j in range(5):
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, i * 5 + j)
feat.SetField(1, 100 + i * 5 + j)
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(%f %f)' % (2 + i / 5.0, 49 + j / 5.0)))
lyr.CreateFeature(feat)
feat = None
ds.ExecuteSQL('CREATE SPATIAL INDEX ON ogr_vrt_30_1')
ds = None
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('tmp/ogr_vrt_30_2.shp')
sr = osr.SpatialReference()
sr.ImportFromEPSG(4326)
lyr = ds.CreateLayer('ogr_vrt_30_2', srs = sr)
lyr.CreateField(ogr.FieldDefn('id2', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('id3', ogr.OFTInteger))
for i in range(5):
for j in range(5):
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, 200 + i * 5 + j)
feat.SetField(1, 300 + i * 5 + j)
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(%f %f)' % (4 + i / 5.0, 49 + j / 5.0)))
lyr.CreateFeature(feat)
feat = None
ds.ExecuteSQL('CREATE SPATIAL INDEX ON ogr_vrt_30_2')
ds = None
f = open('tmp/ogr_vrt_30.vrt', 'wt')
f.write("""<OGRVRTDataSource>
<OGRVRTUnionLayer name="union_layer">
<OGRVRTLayer name="ogr_vrt_30_1">
<SrcDataSource relativetoVRT="1">ogr_vrt_30_1.shp</SrcDataSource>
</OGRVRTLayer>
<OGRVRTLayer name="ogr_vrt_30_2">
<SrcDataSource relativetoVRT="1">ogr_vrt_30_2.shp</SrcDataSource>
</OGRVRTLayer>
</OGRVRTUnionLayer>
</OGRVRTDataSource>\n""")
f.close()
# Check
for check in range(10):
ds = ogr.Open('tmp/ogr_vrt_30.vrt', update = 1)
lyr = ds.GetLayer(0)
if check == 0:
sr = lyr.GetSpatialRef()
got_wkt = sr.ExportToWkt()
if got_wkt.find('GEOGCS["GCS_WGS_1984"') == -1:
gdaltest.post_reason('did not get expected WKT')
print(got_wkt)
return 'fail'
elif check == 1:
bb = lyr.GetExtent()
expected_bb = (2.0, 4.7999999999999998, 49.0, 49.799999999999997)
for i in range(4):
if abs(bb[i] - expected_bb[i]) > 1:
gdaltest.post_reason('did not get expected extent')
print(bb)
return 'fail'
elif check == 2:
feat_count = lyr.GetFeatureCount()
if feat_count != 2 * 5 * 5:
gdaltest.post_reason('did not get expected feature count')
print(feat_count)
return 'fail'
elif check == 3:
if lyr.GetLayerDefn().GetFieldCount() != 3:
gdaltest.post_reason('did not get expected field count')
return 'fail'
elif check == 4:
feat = lyr.GetNextFeature()
i = 0
while feat is not None:
if i < 5 * 5:
if feat.GetFID() != i:
gdaltest.post_reason('did not get expected value')
print(feat.GetFID())
return 'fail'
if feat.GetFieldAsInteger("id1") != i:
gdaltest.post_reason('did not get expected value')
return 'fail'
if feat.GetFieldAsInteger("id2") != 100 + i:
gdaltest.post_reason('did not get expected value')
return 'fail'
if feat.IsFieldSet("id3"):
gdaltest.post_reason('did not get expected value')
return 'fail'
if ogrtest.check_feature_geometry(feat, 'POINT(%f %f)' % (2 + int(i / 5) / 5.0, 49 + int(i % 5) / 5.0)) != 0:
gdaltest.post_reason('did not get expected value')
return 'fail'
else:
if feat.GetFID() != i:
gdaltest.post_reason('did not get expected value')
print(feat.GetFID())
return 'fail'
if feat.IsFieldSet("id1"):
gdaltest.post_reason('did not get expected value')
return 'fail'
if feat.GetFieldAsInteger("id2") != 200 + i - 5 * 5:
gdaltest.post_reason('did not get expected value')
return 'fail'
if feat.GetFieldAsInteger("id3") != 300 + i - 5 * 5:
gdaltest.post_reason('did not get expected value')
return 'fail'
if ogrtest.check_feature_geometry(feat, 'POINT(%f %f)' % (4 + int((i - 5 * 5) / 5) / 5.0, 49 + int((i - 5 * 5) % 5) / 5.0)) != 0:
gdaltest.post_reason('did not get expected value')
return 'fail'
i = i + 1
feat = lyr.GetNextFeature()
elif check == 5:
if lyr.GetGeomType() != ogr.wkbPoint:
gdaltest.post_reason('did not get expected geom type')
return 'fail'
elif check == 6:
if lyr.TestCapability(ogr.OLCFastFeatureCount) != 1:
gdaltest.post_reason('did not get expected capability')
return 'fail'
if lyr.TestCapability(ogr.OLCFastGetExtent) != 1:
gdaltest.post_reason('did not get expected capability')
return 'fail'
if lyr.TestCapability(ogr.OLCFastSpatialFilter) != 1:
gdaltest.post_reason('did not get expected capability')
return 'fail'
if lyr.TestCapability(ogr.OLCStringsAsUTF8) != 1:
gdaltest.post_reason('did not get expected capability')
return 'fail'
if lyr.TestCapability(ogr.OLCIgnoreFields) != 1:
gdaltest.post_reason('did not get expected capability')
return 'fail'
if lyr.TestCapability(ogr.OLCRandomWrite) != 0:
gdaltest.post_reason('did not get expected capability')
return 'fail'
if lyr.TestCapability(ogr.OLCSequentialWrite) != 0:
gdaltest.post_reason('did not get expected capability')
return 'fail'
elif check == 7:
lyr.SetSpatialFilterRect(2.49, 49.29, 4.49, 49.69)
if lyr.GetFeatureCount() != 10:
print(lyr.GetFeatureCount())
gdaltest.post_reason('did not get expected feature count')
return 'fail'
elif check == 8:
lyr.SetAttributeFilter('id1 = 0')
if lyr.GetFeatureCount() != 1:
print(lyr.GetFeatureCount())
gdaltest.post_reason('did not get expected feature count')
return 'fail'
elif check == 9:
# CreateFeature() should fail
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('id2', 12345)
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateFeature(feat)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('should have failed')
return 'fail'
feat = None
# SetFeature() should fail
lyr.ResetReading()
feat = lyr.GetNextFeature()
feat.SetField('id2', 45321)
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.SetFeature(feat)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('should have failed')
return 'fail'
feat = None
# Test feature existence : should fail
lyr.SetAttributeFilter('id2 = 12345 or id2 = 45321')
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is not None:
gdaltest.post_reason('should have failed')
return 'fail'
ds = None
# Check with test_ogrsf
import test_cli_utilities
if test_cli_utilities.get_test_ogrsf_path() is not None:
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -ro tmp/ogr_vrt_30.vrt --config OGR_VRT_MAX_OPENED 1')
if ret.find('INFO') == -1 or ret.find('ERROR') != -1:
gdaltest.post_reason('fail')
print(ret)
return 'fail'
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' tmp/ogr_vrt_30.vrt')
if ret.find('INFO') == -1 or ret.find('ERROR') != -1:
gdaltest.post_reason('fail')
print(ret)
return 'fail'
# Test various optional attributes
f = open('tmp/ogr_vrt_30.vrt', 'wt')
f.write("""<OGRVRTDataSource>
<OGRVRTUnionLayer name="union_layer">
<OGRVRTLayer name="ogr_vrt_30_1">
<SrcDataSource relativetoVRT="1">ogr_vrt_30_1.shp</SrcDataSource>
</OGRVRTLayer>
<OGRVRTLayer name="ogr_vrt_30_2">
<SrcDataSource relativetoVRT="1">ogr_vrt_30_2.shp</SrcDataSource>
</OGRVRTLayer>
<SourceLayerFieldName>source_layer</SourceLayerFieldName>
<PreserveSrcFID>ON</PreserveSrcFID>
<FieldStrategy>Intersection</FieldStrategy>
<GeometryType>wkbPoint25D</GeometryType>
<LayerSRS>WGS72</LayerSRS>
<FeatureCount>100</FeatureCount>
<ExtentXMin>-180</ExtentXMin>
<ExtentYMin>-90</ExtentYMin>
<ExtentXMax>180</ExtentXMax>
<ExtentYMax>90</ExtentYMax>
</OGRVRTUnionLayer>
</OGRVRTDataSource>\n""")
f.close()
for check in range(9):
ds = ogr.Open('tmp/ogr_vrt_30.vrt', update = 1)
lyr = ds.GetLayer(0)
if check == 0:
sr = lyr.GetSpatialRef()
got_wkt = sr.ExportToWkt()
if got_wkt.find('WGS 72') == -1:
gdaltest.post_reason('did not get expected WKT')
print(got_wkt)
return 'fail'
elif check == 1:
bb = lyr.GetExtent()
expected_bb = (-180.0, 180.0, -90.0, 90.0)
for i in range(4):
if abs(bb[i] - expected_bb[i]) > 1:
gdaltest.post_reason('did not get expected extent')
print(bb)
return 'fail'
elif check == 2:
if lyr.GetFeatureCount() != 100:
gdaltest.post_reason('did not get expected feature count')
return 'fail'
elif check == 3:
if lyr.GetLayerDefn().GetFieldCount() != 2:
gdaltest.post_reason('did not get expected field count')
return 'fail'
elif check == 4:
feat = lyr.GetNextFeature()
i = 0
while feat is not None:
if i < 5 * 5:
if feat.GetFID() != i:
gdaltest.post_reason('did not get expected value')
print(feat.GetFID())
return 'fail'
if feat.GetFieldAsString("source_layer") != 'ogr_vrt_30_1':
gdaltest.post_reason('did not get expected value')
return 'fail'
if feat.GetFieldAsInteger("id2") != 100 + i:
gdaltest.post_reason('did not get expected value')
return 'fail'
else:
if feat.GetFID() != i - 5 * 5:
gdaltest.post_reason('did not get expected value')
print(feat.GetFID())
return 'fail'
if feat.GetFieldAsString("source_layer") != 'ogr_vrt_30_2':
gdaltest.post_reason('did not get expected value')
return 'fail'
if feat.GetFieldAsInteger("id2") != 200 + i - 5 * 5:
gdaltest.post_reason('did not get expected value')
return 'fail'
i = i + 1
feat = lyr.GetNextFeature()
elif check == 5:
if lyr.GetGeomType() != ogr.wkbPoint25D:
gdaltest.post_reason('did not get expected geom type')
return 'fail'
elif check == 6:
if lyr.TestCapability(ogr.OLCFastFeatureCount) != 1:
gdaltest.post_reason('did not get expected capability')
return 'fail'
if lyr.TestCapability(ogr.OLCFastGetExtent) != 1:
gdaltest.post_reason('did not get expected capability')
return 'fail'
if lyr.TestCapability(ogr.OLCFastSpatialFilter) != 1:
gdaltest.post_reason('did not get expected capability')
return 'fail'
if lyr.TestCapability(ogr.OLCStringsAsUTF8) != 1:
gdaltest.post_reason('did not get expected capability')
return 'fail'
if lyr.TestCapability(ogr.OLCIgnoreFields) != 1:
gdaltest.post_reason('did not get expected capability')
return 'fail'
if lyr.TestCapability(ogr.OLCRandomWrite) != 1:
gdaltest.post_reason('did not get expected capability')
return 'fail'
if lyr.TestCapability(ogr.OLCSequentialWrite) != 1:
gdaltest.post_reason('did not get expected capability')
return 'fail'
elif check == 7:
lyr.SetSpatialFilterRect(2.49, 49.29, 4.49, 49.69)
if lyr.GetFeatureCount() != 10:
gdaltest.post_reason('did not get expected feature count')
return 'fail'
elif check == 8:
# invalid source_layer name with CreateFeature()
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('source_layer', 'random_name')
feat.SetField('id2', 12345)
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateFeature(feat)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('should have failed')
return 'fail'
feat = None
# unset source_layer name with CreateFeature()
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('id2', 12345)
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateFeature(feat)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('should have failed')
return 'fail'
feat = None
# FID set with CreateFeature()
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetFID(999999)
feat.SetField('source_layer', 'ogr_vrt_30_2')
feat.SetField('id2', 12345)
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateFeature(feat)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('should have failed')
return 'fail'
feat = None
# CreateFeature() OK
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('source_layer', 'ogr_vrt_30_2')
feat.SetField('id2', 12345)
if lyr.CreateFeature(feat) != 0:
gdaltest.post_reason('should have succeeded')
return 'fail'
# SetFeature() OK
feat.SetField('id2', 45321)
if lyr.SetFeature(feat) != 0:
gdaltest.post_reason('should have succeeded')
return 'fail'
# invalid source_layer name with SetFeature()
feat.SetField('source_layer', 'random_name')
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.SetFeature(feat)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('should have failed')
return 'fail'
# unset source_layer name with SetFeature()
feat.UnsetField('source_layer')
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.SetFeature(feat)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('should have failed')
return 'fail'
# FID unset with SetFeature()
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('source_layer', 'ogr_vrt_30_2')
feat.SetField('id2', 12345)
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.SetFeature(feat)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('should have failed')
return 'fail'
feat = None
# Test feature existence (with passthru)
lyr.SetAttributeFilter('id2 = 45321 AND OGR_GEOMETRY IS NULL')
if lyr.TestCapability(ogr.OLCFastFeatureCount) != 1:
gdaltest.post_reason('should have returned 1')
return 'fail'
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('should have succeeded')
return 'fail'
# Test feature existence (without passthru)
lyr.SetAttributeFilter("id2 = 45321 AND OGR_GEOMETRY IS NULL AND source_layer = 'ogr_vrt_30_2'")
if lyr.TestCapability(ogr.OLCFastFeatureCount) != 0:
gdaltest.post_reason('should have returned 0')
return 'fail'
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('should have succeeded')
return 'fail'
# Test SyncToDisk()
if lyr.SyncToDisk() != 0:
gdaltest.post_reason('should have succeeded')
return 'fail'
ds = None
for filename in [ 'tmp/ogr_vrt_30_1.shp',
'tmp/ogr_vrt_30_1.shx',
'tmp/ogr_vrt_30_1.dbf',
'tmp/ogr_vrt_30_1.prj',
'tmp/ogr_vrt_30_1.qix',
'tmp/ogr_vrt_30_2.shp',
'tmp/ogr_vrt_30_2.shx',
'tmp/ogr_vrt_30_2.dbf',
'tmp/ogr_vrt_30_2.prj',
'tmp/ogr_vrt_30_2.qix' ]:
try:
os.unlink(filename)
except:
pass
os.unlink('tmp/ogr_vrt_30.vrt')
return 'success'
###############################################################################
# Test anti-recursion mechanism with union layer
def ogr_vrt_31(shared_ds_flag = ''):
rec1 = """<OGRVRTDataSource>
<OGRVRTUnionLayer name="rec1">
<OGRVRTLayer name="rec2">
<SrcDataSource%s>/vsimem/rec2.vrt</SrcDataSource>
</OGRVRTLayer>
<OGRVRTLayer name="rec1">
<SrcDataSource%s>/vsimem/rec1.vrt</SrcDataSource>
</OGRVRTLayer>
</OGRVRTUnionLayer>
</OGRVRTDataSource>""" % (shared_ds_flag, shared_ds_flag)
rec2 = """<OGRVRTDataSource>
<OGRVRTUnionLayer name="rec2">
<OGRVRTLayer name="rec1">
<SrcDataSource%s>/vsimem/rec1.vrt</SrcDataSource>
</OGRVRTLayer>
<OGRVRTLayer name="rec2">
<SrcDataSource%s>/vsimem/rec2.vrt</SrcDataSource>
</OGRVRTLayer>
</OGRVRTUnionLayer>
</OGRVRTDataSource>""" % (shared_ds_flag, shared_ds_flag)
gdal.FileFromMemBuffer('/vsimem/rec1.vrt', rec1)
gdal.FileFromMemBuffer('/vsimem/rec2.vrt', rec2)
ds = ogr.Open('/vsimem/rec1.vrt')
if ds is None:
return 'fail'
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds.GetLayer(0).GetLayerDefn()
ds.GetLayer(0).GetFeatureCount()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('error expected !')
return 'fail'
gdal.Unlink('/vsimem/rec1.vrt')
gdal.Unlink('/vsimem/rec2.vrt')
return 'success'
###############################################################################
# Test anti-recursion mechanism on shared DS
def ogr_vrt_32():
return ogr_vrt_31(' shared="1"')
###############################################################################
#
def ogr_vrt_cleanup():
if gdaltest.vrt_ds is None:
return 'skip'
gdal.Unlink('/vsimem/rec1.vrt')
gdal.Unlink('/vsimem/rec2.vrt')
gdaltest.vrt_ds.Destroy()
gdaltest.vrt_ds = None
return 'success'
gdaltest_list = [
ogr_vrt_1,
ogr_vrt_2,
ogr_vrt_3,
ogr_vrt_4,
ogr_vrt_5,
ogr_vrt_6,
ogr_vrt_7,
ogr_vrt_8,
ogr_vrt_9,
ogr_vrt_10,
ogr_vrt_11,
ogr_vrt_12,
ogr_vrt_13,
ogr_vrt_14,
ogr_vrt_15,
ogr_vrt_16,
ogr_vrt_17,
ogr_vrt_18,
ogr_vrt_19_optimized,
ogr_vrt_19_nonoptimized,
ogr_vrt_20,
ogr_vrt_21,
ogr_vrt_22,
ogr_vrt_23,
ogr_vrt_24,
ogr_vrt_25,
ogr_vrt_26,
ogr_vrt_27,
ogr_vrt_28,
ogr_vrt_29,
ogr_vrt_30,
ogr_vrt_31,
ogr_vrt_32,
ogr_vrt_cleanup ]
if __name__ == '__main__':
gdaltest.setup_run( 'ogr_vrt' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
|
import functools
import operator
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.pooling import average_pooling_nd_kernel
from chainer.functions.pooling import pooling_nd
from chainer.utils import conv
from chainer.utils import conv_nd
import chainerx
def _get_conv_slices(
size, k, s, p, cover_all=False, d=1, include_pad=True, dtype='l'):
"""Returns the patch slices.
Returns:
A tuple of two 1-D :class:`numpy.ndarrays`\\ s.
Each represents starting and ending indices of the patches.
"""
n = conv.get_conv_outsize(size, k, s, p, cover_all, d)
starts = -p + numpy.arange(n, dtype=dtype) * s
ends = starts + k
if not include_pad:
starts = numpy.maximum(starts, 0)
ends = numpy.minimum(ends, size)
return starts, ends
class AveragePoolingND(pooling_nd._PoolingND):
"""Average pooling over a set of N-dimensional planes.
.. warning::
This feature is experimental. The interface can change in the future.
"""
def __init__(
self, ndim, ksize, stride=None, pad=0, cover_all=False,
pad_value=0):
if not (pad_value is None or pad_value == 0):
raise ValueError(
'pad_value must be either 0 or None, not {}.'.format(
pad_value))
# TODO(takagi) Support cover_all mode.
if cover_all is True:
raise ValueError('`cover_all` mode is not supported yet.')
super(AveragePoolingND, self).__init__(
ndim, ksize, stride=stride, pad=pad, cover_all=cover_all)
self.pad_value = pad_value
def _get_pooling_width(self, xp, dims, dtype):
width = None
for d, k, s, p in six.moves.zip(
dims, self.ksize, self.stride, self.pad):
starts, ends = _get_conv_slices(
d, k, s, p, cover_all=self.cover_all, include_pad=False,
dtype=dtype)
w = ends - starts
if width is None:
width = w
else:
width = numpy.tensordot(width[..., None], w[None, ...], axes=1)
if xp is cuda.cupy:
width = cuda.cupy.array(width)
return width
def forward_chainerx(self, inputs):
ndim = self.ndim
ksize = self.ksize
stride = self.stride
pad = self.pad
pad_value = self.pad_value
x, = inputs
if x.device.backend.name == 'cuda' and ndim not in (2, 3):
return chainer.Fallback
if pad_value == 0:
pad_mode = 'zero'
elif pad_value is None:
pad_mode = 'ignore'
else:
assert False
y = chainerx.average_pool(x, ksize, stride, pad, pad_mode)
return y,
def forward_cpu(self, inputs):
ksize = self.ksize
stride = self.stride
pad = self.pad
pad_value = self.pad_value
cover_all = self.cover_all
x, = inputs
in_shape = x.shape
in_dtype = x.dtype
col = conv_nd.im2col_nd_cpu(x, ksize, stride, pad, cover_all=cover_all)
# mean along (_, _, k_1, k_2, ..., k_N, _, ..., _)
y_axis = tuple(six.moves.range(2, 2 + len(ksize)))
if pad_value is None:
dims = x.shape[2:]
width = self._get_pooling_width(numpy, dims, x.dtype)
y = col.sum(axis=y_axis) / width
else:
assert pad_value == 0
y = col.mean(axis=y_axis)
width = None
self.width = width
self._in_shape = in_shape
self._in_dtype = in_dtype
return y,
def forward_gpu(self, inputs):
if chainer.should_use_cudnn('>=auto') and 2 <= self.ndim <= 3:
# With cuDNN v3 or greater, use cuDNN implementation for inputs
# with spatial dimensions of two or more.
return self.forward_cudnn(inputs)
ndim = self.ndim
ksize = self.ksize
stride = self.stride
pad = self.pad
pad_value = self.pad_value
cover_all = self.cover_all
x, = inputs
in_shape = x.shape
in_dtype = x.dtype
n, c = in_shape[:2]
idims = in_shape[2:]
odims = tuple(
conv.get_conv_outsize(d, k, s, p, cover_all=cover_all)
for (d, k, s, p) in six.moves.zip(idims, ksize, stride, pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + odims
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
if pad_value is None:
coeff = self._get_pooling_width(cuda.cupy, idims, x.dtype)
coeff = cuda.cupy.reciprocal(coeff, out=coeff)
else:
assert pad_value == 0
coeff = 1. / functools.reduce(operator.mul, ksize)
in_params, out_params, operation, name = \
average_pooling_nd_kernel.AveragePoolingNDKernelForward.generate(
ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x.reduced_view(),
*(idims + odims + ksize + stride + pad + (coeff, y)))
self.coeff = coeff
self._in_shape = in_shape
self._in_dtype = in_dtype
return y,
def backward(self, indexes, gy):
return AveragePoolingNDGrad(self).apply(gy)
def get_cudnn_pool_mode(self):
if self.pad_value is None:
return cuda.cuda.cudnn.CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING
else:
assert self.pad_value == 0
return cuda.cuda.cudnn.CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING
class AveragePoolingNDGrad(function_node.FunctionNode):
def __init__(self, apoolnd):
self.func = apoolnd
def forward_cpu(self, gys):
func = self.func
pad_value = func.pad_value
ksize = func.ksize
stride = func.stride
pad = func.pad
in_shape = func._in_shape
gy, = gys
idims = in_shape[2:]
odims = gy.shape[2:]
colon = slice(None, None, None)
is_pad_value_none = pad_value is None
if is_pad_value_none:
numpy.divide(gy, func.width, out=gy)
gy_index = (colon, colon) + (None,) * len(idims)
gcol_reps = (1, 1) + ksize + (1,) * len(odims)
gcol = numpy.tile(gy[gy_index], gcol_reps)
gx = conv_nd.col2im_nd_cpu(gcol, stride, pad, idims)
if not is_pad_value_none:
gx /= functools.reduce(operator.mul, ksize)
return gx,
def forward_gpu(self, gys):
func = self.func
if func.is_cudnn_used:
return func.backward_cudnn(gys)
ndim = func.ndim
pad_value = func.pad_value
ksize = func.ksize
stride = func.stride
pad = func.pad
in_shape = func._in_shape
in_dtype = func._in_dtype
is_pad_value_none = pad_value is None
gy, = gys
n, c = in_shape[:2]
idims = in_shape[2:]
odims = gy.shape[2:]
if is_pad_value_none:
# This conversion from chainerx to cupy exists here for
# double backward of chainerx on cuda.
coeff = backend.from_chx(func.coeff)
gy *= coeff
gx = cuda.cupy.empty(in_shape, in_dtype)
in_params, out_params, operation, name = \
average_pooling_nd_kernel.AveragePoolingNDKernelBackward.generate(
ndim)
cuda.elementwise(in_params, out_params, operation, name)(
gy.reduced_view(),
*(idims + odims + ksize + stride + pad + (gx,)))
if not is_pad_value_none:
gx /= functools.reduce(operator.mul, ksize)
return gx,
def backward(self, indexes, grad_outputs):
func = self.func
ndim = func.ndim
pad_value = func.pad_value
ksize = func.ksize
stride = func.stride
pad = func.pad
return AveragePoolingND(
ndim, ksize, stride, pad, cover_all=False, pad_value=pad_value
).apply(grad_outputs)
def average_pooling_nd(x, ksize, stride=None, pad=0, pad_value=0):
"""N-dimensionally spatial average pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
This function provides a N-dimensionally generalized version of
:func:`~chainer.functions.average_pooling_2d`. This acts similarly to
:func:`~chainer.functions.convolution_nd`, but it computes the average of
input spatial patch for each channel without any parameter instead of
computing the inner products.
Args:
x(~chainer.Variable): Input variable.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
pad_value (0 or None):
Value to fill the padded region when calculating average.
If ``None`` is specified, such region is ignored.
The default value is ``0``, therefore the averages are biased
towards zero.
Returns:
~chainer.Variable: Output variable.
.. note::
This function currently does not support ``cover_all`` mode as
:func:`max_pooling_nd`. Average pooling runs in non-cover-all mode.
"""
ndim = len(x.shape[2:])
return AveragePoolingND(
ndim, ksize, stride=stride, pad=pad, pad_value=pad_value
).apply((x,))[0]
def average_pooling_1d(x, ksize, stride=None, pad=0, pad_value=0):
"""1-dimensional spatial average pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
.. note::
This function calls :func:`~chainer.functions.average_pooling_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.average_pooling_nd`.
"""
if len(x.shape[2:]) != 1:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 1. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return average_pooling_nd(x, ksize, stride, pad, pad_value)
def average_pooling_3d(x, ksize, stride=None, pad=0, pad_value=0):
"""3-dimensional spatial average pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
.. note::
This function calls :func:`~chainer.functions.average_pooling_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.average_pooling_nd`.
"""
if len(x.shape[2:]) != 3:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 3. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return average_pooling_nd(x, ksize, stride, pad, pad_value)
|
numbers = [15, 32, 199, 22, 11]
while True:
answer = input("Guess a number:")
if answer == "q":
break
try:
answer = int(answer)
except ValueError:
print("Error! Please type a number!")
if answer in numbers:
print("Correct")
else:
print("Incorrect!") |
# -*- coding: utf-8 -*-
#####################################################################
########### Fields for WorkBook 'Capital Working' ##########
#####################################################################
from openerp import models, fields, api
class capital_working_sub(models.Model):
_name = 'capital_working_sub.capital_working_sub'
business = fields.Char(string = "Business", required=True)
all_years = fields.Boolean(string="All Years", default=False)
name = fields.Many2one('res.partner','Client Name', required=True)
_rec_name = 'business'
capital_working_id = fields.One2many('capital_working_sub_sub.capital_working_sub_sub', 'capital_working_sub_id')
capital_working_ids = fields.One2many('capital_working_sub_sub.capital_working_sub_sub', 'capital_working_sub_id')
class capital_working_sub_sub(models.Model):
_name = 'capital_working_sub_sub.capital_working_sub_sub'
description = fields.Char(required=True)
y2005 = fields.Float(string = "2005")
y2006 = fields.Float(string = "2006")
y2007 = fields.Float(string = "2007")
y2008 = fields.Float(string = "2008")
y2009 = fields.Float(string = "2009")
y2010 = fields.Float(string = "2010")
y2011 = fields.Float(string = "2011")
y2012 = fields.Float(string = "2012")
y2013 = fields.Float(string = "2013")
y2014 = fields.Float(string = "2014")
y2015 = fields.Float(string = "2015")
y2016 = fields.Float(string = "2016")
y2017 = fields.Float(string = "2017")
y2018 = fields.Float(string = "2018")
y2019 = fields.Float(string = "2019")
y2020 = fields.Float(string = "2020")
sequence = fields.Integer(string ='Sequence')
_order = 'sequence'
capital_working_sub_id = fields.Many2one('capital_working_sub.capital_working_sub',
ondelete='cascade', string="Capital Working Sub", required=True)
class capital_working(models.Model):
_name = 'capital_working.capital_working'
business = fields.Many2one('capital_working_sub.capital_working_sub', domain="[('name','=',parent.name)]", string = "Business", required=True)
y2005 = fields.Float(string = "2005")
y2006 = fields.Float(string = "2006")
y2007 = fields.Float(string = "2007")
y2008 = fields.Float(string = "2008")
y2009 = fields.Float(string = "2009")
y2010 = fields.Float(string = "2010")
y2011 = fields.Float(string = "2011")
y2012 = fields.Float(string = "2012")
y2013 = fields.Float(string = "2013")
y2014 = fields.Float(string = "2014")
y2015 = fields.Float(string = "2015")
y2016 = fields.Float(string = "2016")
y2017 = fields.Float(string = "2017")
y2018 = fields.Float(string = "2018")
y2019 = fields.Float(string = "2019")
y2020 = fields.Float(string = "2020")
sequence = fields.Integer(string ='Sequence')
_order = 'sequence'
capital_working_id = fields.Many2one('comparative.wealth',
ondelete='cascade', string="Capital Working", required=True)
@api.onchange('business')
def _onchange_business(self):
if len(self.business) > 0:
bb = self.business
capital_total = self.env['capital_working_sub.capital_working_sub'].search([('business','=',bb.business),('id','=',self.business.id)])
self.y2005 = sum(x.y2005 for x in capital_total.capital_working_ids)
self.y2006 = sum(x.y2006 for x in capital_total.capital_working_ids)
self.y2007 = sum(x.y2007 for x in capital_total.capital_working_ids)
self.y2008 = sum(x.y2008 for x in capital_total.capital_working_ids)
self.y2009 = sum(x.y2009 for x in capital_total.capital_working_ids)
self.y2010 = sum(x.y2010 for x in capital_total.capital_working_ids)
self.y2011 = sum(x.y2011 for x in capital_total.capital_working_ids)
self.y2012 = sum(x.y2012 for x in capital_total.capital_working_ids)
self.y2013 = sum(x.y2013 for x in capital_total.capital_working_ids)
self.y2014 = sum(x.y2014 for x in capital_total.capital_working_ids)
self.y2015 = sum(x.y2015 for x in capital_total.capital_working_ids)
self.y2016 = sum(x.y2016 for x in capital_total.capital_working_ids)
self.y2017 = sum(x.y2017 for x in capital_total.capital_working_ids)
self.y2018 = sum(x.y2018 for x in capital_total.capital_working_ids)
self.y2019 = sum(x.y2019 for x in capital_total.capital_working_ids)
self.y2020 = sum(x.y2020 for x in capital_total.capital_working_ids) |
from django.views import generic
from books.models import Book
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.shortcuts import get_object_or_404
class BookListView(generic.ListView):
model = Book
context_object_name = 'book_list'
template_name = 'book_list.html'
# формируем запрос к базе и сортируем книги по дате и отдаем в представление
def get_queryset(self, **kwargs):
return Book.objects.order_by('pub_date')
class BookListViewSort(generic.ListView):
model = Book
context_object_name = 'book_list'
template_name = 'book_list.html'
paginate_by = 1 # включаем пагинацию с шагом 1
# формируем запрос к базе и сортируем книги по дате и отдаем в представление
def get_queryset(self, **kwargs):
return Book.objects.order_by('pub_date')
# добавляем в контекст данные на основании даты в урле
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
list_books = Book.objects.order_by('pub_date')
cur_book = Book.objects.filter(name=get_object_or_404(Book, pub_date=self.kwargs['date']).name)
paginator = Paginator(list_books, self.paginate_by)
page = (list(list_books).index(cur_book[0]) + 1)
try:
pagess = paginator.page(page)
if pagess.has_next():
context['next_page'] = str(list(list_books)[page].pub_date)
if pagess.has_previous():
context['previous_page'] = str(list(list_books)[page - 2].pub_date)
except PageNotAnInteger:
pagess = paginator.page(1)
except EmptyPage:
pagess = paginator.page(paginator.num_pages)
# пришлось переопределить дефолтную пагинацию нехотела брать то что надо (((
# мозги все сломал в конец
context['page_obj'] = pagess
context['book_list'] = cur_book
return context
|
# Rock-paper-scissors-lizard-Spock template
# The key idea of this program is to equate the strings
# "rock", "paper", "scissors", "lizard", "Spock" to numbers
# as follows:
#
# 0 - rock
# 1 - Spock
# 2 - paper
# 3 - lizard
# 4 - scissors
# helper functions
import random
def name_to_number(name):
# delete the following pass statement and fill in your code below
if name=="rock":
return 0
elif name=="spock":
return 1
elif name=="paper":
return 2
elif name == "lizard":
return 3
#elif name == "scissors":
else:
return 4
# convert name to number using if/elif/else
# don't forget to return the result!
def number_to_name(number):
# delete the following pass statement and fill in your code below
pass
if number==0:
return "rock"
elif number ==1:
return "spock"
elif number == 2:
return "paper"
elif number == 3:
return "lizard"
#elif number == 4:
else:
return "scissors"
# convert number to a name using if/elif/else
# don't forget to return the result!
def rpsls(player_choice):
# delete the following pass statement and fill in your code below
#pass
# print a blank line to separate consecutive games
print ""
# print out the message for the player's choice
print "Player chooses ",player_choice
# convert the player's choice to player_number using the function name_to_number()
player_number = name_to_number(player_choice)
# compute random guess for comp_number using random.randrange()
comp_number = random.randrange(5)
# convert comp_number to comp_choice using the function number_to_name()
comp_choice = number_to_name(comp_number)
# print out the message for computer's choice
print "Computer chooses ",comp_choice
# compute difference of comp_number and player_number modulo five
diff = (int(comp_number)-player_number)%5
# use if/elif/else to determine winner, print winner message
if diff == 1 or diff == 2:
print "Computer wins!"
elif diff == 3 or diff == 4:
print "Player wins!"
else:
print "Player and Computer tie"
# test your code - THESE CALLS MUST BE PRESENT IN YOUR SUBMITTED CODE
rpsls("rock")
rpsls("Spock")
rpsls("paper")
rpsls("lizard")
rpsls("scissors")
# always remember to check your completed program against the grading rubric
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/05/07 17:48
# @Author :
# @File : company.py
# @Software: PyCharm
# @Desc :
"""
"""
from typing import Optional
from fastapi import APIRouter
from app.core.quotations_fix.eastmoney_req import EastMoneyRequest
from app.utils import response_code
router = APIRouter()
@router.get("/search", summary="检索")
async def top(words: str, count: Optional[int] = 10):
"""
检索 \n
return: 检索结果
"""
es_req = EastMoneyRequest()
results = es_req.search(words)
return response_code.resp_200(results[0:count])
|
#!/usr/bin/env python
#
# Pydget -
# Depositfiles Handler
#
import os, sys, time
import urllib, urllib2, urlparse
import BeautifulSoup
import form_grabber
def prepare_download(opener, page_url):
# Get web page
sys.stdout.write("[+] Grabbing web page, ")
sys.stdout.flush()
page = opener.open(page_url)
print "Done"
# "click" the free download button
sys.stdout.write("[+] Requesting free download, ")
sys.stdout.flush()
form_action = page.geturl()
data = {"gateway_result": "1"}
data = urllib.urlencode(data)
request = urllib2.Request(form_action, data)
response = opener.open(request).read()
print "Done"
# Parse for file download URL
file_download_url = response.split("('#download_container').load('")[1]
file_download_url = file_download_url.split("'")[0]
file_download_url = urlparse.urljoin("http://depositfiles.com", file_download_url)
# Wait the requisite 60 seconds for free downloads
print "[+] Waiting the required 60 seconds"
last_line_length = 0
for i in range(60, -1, -1):
line = "[+] Time remaining: %d seconds" % i
while len(line) < last_line_length:
line += " "
last_line_length = len(line)
sys.stdout.write("\r%s" % line)
sys.stdout.flush()
time.sleep(1)
print
# get the ACTUAL file download link
sys.stdout.write("[+] Building download request, ")
sys.stdout.flush()
page = opener.open(file_download_url).read()
soup = BeautifulSoup.BeautifulSoup(page)
form_action, data = form_grabber.process_form(soup, file_download_url)
data["submit"] = "Download the file"
data = urllib.urlencode(data)
request = urllib2.Request(form_action, data)
print "Done"
return request, form_action |
from __future__ import absolute_import
import requests
from systemalib.utils.functional import dmerge
from systemalib.utils import json
def response_status(response):
return response.status_code
def response_json(response):
return json.loads(response.content)
def response_header(key, response):
return response.headers[key]
def json_data(data, **kwargs):
return dmerge(kwargs, {
'data': json.dumps(data) if data else None,
'headers': {'content-type': 'application/json'}
})
def headers(data, **kwargs):
return dmerge(kwargs, {
'headers': data,
})
def raise_status(response):
response.raise_for_status()
return response
def request(verb, url, **kwargs):
method = getattr(requests, verb.lower())
response = method(url, **kwargs)
return response
|
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route("/mypage/me")
def hello():
return render_template("form.html")
@app.route("/message/contact", methods=["POST"])
def post_message():
return request.form.get("text")
app.run()
|
import urllib2
url = 'https://www.douban.com/accounts/login'
response = urllib2.urlopen(url)
content = urllib2.urlopen(url).read()
fi = open('begin.php','w')
fi.write(content)
fi.close()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 9 15:24:22 2019
User Behavior Analysis and Commodity Recommendation for Point-Earning Apps
Yu-Ching Chen, Chia-Ching Yang, Yan-Jian Liau, Chia-Hui Chang
National Central University
Taoyuan, Taiwan
TAAI2016
Initial_Data_Process_Code
With Time Series Feature
@author: Administrator
"""
import numpy as np
import pandas as pd
# import statsmodels.api as sm
# from statsmodels.formula.api import ols
# from statsmodels.stats.anova import anova_lm
# from statsmodels.graphics.factorplots import interaction_plot
# import matplotlib.pyplot as plt
# from scipy import stats
# import datetime as dt
Order_Data_Part1_Name = "122532-2019-09-10+152846.csv"
# The Order data from 2019-06-01 to 2019-07-20
Order_Data_Part2_Name = "122589-2019-09-10+163353.csv"
# The Order data from 2019-07-21 to 2019-08-31
Order_Data_Part1 = pd.read_csv(Order_Data_Part1_Name, encoding = 'utf-8', sep = ',', error_bad_lines=False)
Order_Data_Part2 = pd.read_csv(Order_Data_Part2_Name, encoding = 'utf-8')
# The data
objs = [Order_Data_Part1, Order_Data_Part2]
Order_Data = pd.concat(objs, axis=0)
# Combine two parts of data together
A = Order_Data.groupby(['phone_no'])['phone_no'].count().sort_values(ascending = False)
# B = A[(A >= 100) and (A <= 200)]
# This is wrong!
A1 = A >= 5
A2 = A <= 10
B = A[A1 & A2]
C = B.index
Filtered_Order_Data = Order_Data.loc[Order_Data['phone_no'].isin(C)] # .isin: select rows with specific row names
# Filter people with more than 20 buying items
Subset_Filtered_Order_Data_Commodity_Code = Filtered_Order_Data[['phone_no', 'two_category_name']]
Subset_Filtered_Order_Data_Commodity_Code = Subset_Filtered_Order_Data_Commodity_Code.reset_index(drop=True)
# Important
DT = pd.to_datetime(Filtered_Order_Data['dt'])
DT = DT.reset_index(drop = True)
DT = pd.DataFrame(DT) # Important
DT['day'] = pd.DataFrame([pd.to_datetime(DT.loc[i,'dt']).day for i in range(len(DT))])
DT['month'] = pd.DataFrame([pd.to_datetime(DT.loc[i,'dt']).month for i in range(len(DT))])
DT['day'] = pd.DataFrame([1 if DT.loc[i,'day'] >= 15 else 0] for i in range(len(DT)))
def MonthTrans(x, y):
if x == 7:
return y+2
elif x == 8:
return y+4
else:
return y
DT['day'] = pd.DataFrame([MonthTrans(DT.loc[i,'month'],DT.loc[i,'day']) for i in range(len(DT))])
# Create a function, combine it with list generator.
D = pd.DataFrame((DT['day']+1)/6, columns = None )
Result_temp = pd.concat([Subset_Filtered_Order_Data_Commodity_Code, D], axis = 1)
User_Item_Table = pd.pivot_table(Result_temp, index = 'phone_no', columns = 'two_category_name', aggfunc = np.sum, values = 'day', fill_value = 0)
# The final table for user-item
# Why need D exist?
User_Item_Table.to_csv('User_Item_subset2.csv', sep=',', header=True, index=True)
# from svmutil import *
# from svm import *
# y, x = [1, -1], [{1: 1, 2: 1}, {1: -1, 2: -1}]
# prob = svm_problem(y, x)
# param = svm_parameter('-t 0 -c 4 -b 1')
# model = svm_train(prob, param)
# yt = [1]
# xt = [{1: 1, 2: 1}]
# p_label, p_acc, p_val = svm_predict(yt, xt, model)
# print(p_label)
Finish_Time_Data_Name = "123859-2019-09-11+164113.csv"
# Table recorded finish_time of each phone_no
Finish_Time_Data = pd.read_csv(Finish_Time_Data_Name, encoding = 'utf-8')
# Result_temp2.isna().sum()
# In total 84817 missing data
Filtered_Finish_Time_Data = Finish_Time_Data.loc[Finish_Time_Data['phone_no'].isin(C)]
Subset_Finish_Time_Data = Filtered_Finish_Time_Data.sort_values(by = ['phone_no', 'finish_time'])
Subset_Finish_Time_Data = Subset_Finish_Time_Data.fillna(method = 'ffill')
Subset_Finish_Time_Data = Subset_Finish_Time_Data.sort_index()
# The nan data has been refilled by the following one time value.
Subset_Finish_Time_Data = Subset_Finish_Time_Data.reset_index(drop = True)
Subset_Finish_Time_Data['Finish_time'] = pd.to_datetime(Subset_Finish_Time_Data['finish_time'])
Subset_Finish_Time_Data = Subset_Finish_Time_Data.drop('finish_time', axis = 1)
Temp = np.tile("aaaaaaaaa", (len(Subset_Finish_Time_Data), 1))
# Create an array where each member has 9-digits capacity.
for i in range(len(Subset_Finish_Time_Data)):
temp = Subset_Finish_Time_Data['Finish_time'][i].hour
if temp >= 7 and temp <= 11:
Temp[i] = "morning"
elif temp >= 12 and temp <= 13:
Temp[i] = "noon"
elif temp >= 14 and temp <= 16:
Temp[i] = "afternoon"
elif temp >= 17 and temp <= 18:
Temp[i] = "evening"
elif temp >= 19 and temp <= 22:
Temp[i] = "night"
Subset_Finish_Time_Data['DATE2'] = Temp
# Create new features related to date
Subset_Filtered_Order_Data_User_Two_Category = Filtered_Order_Data[['phone_no', 'two_category_name']]
def sub_max(arr, n):
ARR = np.zeros(len(arr))
# print(ARR)
for i in range(n):
arr_ = arr
Index = np.argmax(arr_)
# print(Index)
ARR[Index] = 1
arr_[np.argmax(arr_)] = np.min(arr)
arr = arr_
return ARR
E = np.tile(1, (len(Subset_Filtered_Order_Data_User_Two_Category), 1))
E = pd.DataFrame(E)
Subset_Filtered_Order_Data_User_Two_Category = Subset_Filtered_Order_Data_User_Two_Category.reset_index(drop=True)
Subset_Filtered_Order_Data_User_Two_Category = pd.concat([Subset_Filtered_Order_Data_User_Two_Category, E], axis = 1)
User_Two_Category_Item = pd.pivot_table(Subset_Filtered_Order_Data_User_Two_Category, index = 'phone_no', columns = 'two_category_name', aggfunc = np.sum, fill_value = 0)
for i in range(len(User_Two_Category_Item)):
Value = User_Two_Category_Item.iloc[i].values
ARR = sub_max(Value, 4)
User_Two_Category_Item.iloc[i] = ARR
#User_Two_Category_Item.to_csv('User_Two_Category_Item.csv', sep=',', header=True, index=True)
# Select the bought two_category items of each member
Subset_Finish_Time_Data_Time_Consumption = Subset_Finish_Time_Data[['phone_no', 'DATE2']]
E = np.tile(1, (len(Subset_Finish_Time_Data_Time_Consumption), 1))
E = pd.DataFrame(E)
Subset_Finish_Time_Data_Time_Consumption = Subset_Finish_Time_Data_Time_Consumption.reset_index(drop=True)
Subset_Finish_Time_Data_Time_Consumption = pd.concat([Subset_Finish_Time_Data_Time_Consumption, E], axis = 1)
User_Time_Consumption_Item = pd.pivot_table(Subset_Finish_Time_Data_Time_Consumption, index = 'phone_no', columns = 'DATE2', aggfunc = np.sum, fill_value = 0)
# User_Time_Consumption_Item = User_Time_Consumption_Item.drop((0,'aaaaaaaaa'), axis = 1)
# Drop the additional useless feature aaaaaaaaa
for i in range(len(User_Time_Consumption_Item)):
Value = User_Time_Consumption_Item.iloc[i].values
ARR = sub_max(Value, 2)
User_Time_Consumption_Item.iloc[i] = ARR
# Indicate the largest two elements as 1 and otherwise 0.
User_Time_Consumption_Item.to_csv('User_Time_Consumption_Item_subset.csv', sep=',', header=True, index=True)
Result_temp = pd.concat([User_Two_Category_Item, User_Time_Consumption_Item], axis = 1)
User_Info_Data_Name = "124822-2019-09-12+152056.csv"
# Basic user info
User_Info_Data = pd.read_csv(User_Info_Data_Name, encoding = 'utf-8')
User_Info_Data = User_Info_Data.drop('id', axis = 1)
Filtered_User_Info_Data = User_Info_Data[User_Info_Data['phone_no'].isin(C)] # .isin: select rows with specific row names
# G = Filtered_Order_Data.index
# Filtered_User_Info_Data = Filtered_User_Info_Data.loc[Filtered_User_Info_Data['phone_no'].isin(G)] # .isin: select rows with specific row names
Filtered_User_Info_Data = Filtered_User_Info_Data.drop_duplicates(subset=None, keep='first', inplace=False)
Filtered_User_Info_Data = Filtered_User_Info_Data.reset_index(drop=True)
Filtered_User_Info_Data.sort_values(by = 'phone_no')
Filtered_User_Info_Data['sex'] -= 1
Filtered_User_Info_Data['property'] -= 1
User_Profile = pd.merge(Result_temp, Filtered_User_Info_Data, on='phone_no')
User_Profile = User_Profile.fillna(method = 'ffill')
# User_Profile = User_Profile.drop('id', axis = 1)
User_Profile.to_csv('User_Profile_subset2.csv', sep=',', header=True, index=True)
Item_Profile_Data = Filtered_Order_Data[['commodity_code', 'two_category_name']]
Item_Profile_Data = Item_Profile_Data.reset_index(drop=True)
# D = np.tile(1, (len(Item_Profile_Data), 1))
# D = pd.DataFrame(D)
Item_Profile_Data = Item_Profile_Data.reset_index(drop=True)
Item_Profile_Data = pd.concat([Item_Profile_Data, D], axis = 1)
Item_Profile = pd.pivot_table(Item_Profile_Data, index = 'two_category_name', columns = 'commodity_code', aggfunc = np.sum, fill_value = 0)
Item_Profile.to_csv('Item_Profile_subset2.csv', sep=',', header=True, index=True)
# Item_Profile.values: shows the data matrix without row and column
|
marks = int(input("enter your marks:"))
if 85 <= marks <= 100:
print('A')
elif 70 <= marks <= 85:
print('B')
elif 50 <= marks <= 69:
print('C')
elif 35 <= marks <= 49:
print('D')
elif marks < 35:
print('fail')
else:
print("enter valid marks below 100") |
import crypt
import hashlib
def testPass(cryptopass):
salt = cryptopass[0:2]
with open('dictionary.txt', 'r') as dictfile:
for word in dictfile:
word = word.strip('\n')
cryptword = crypt.crypt(word, salt)
if (cryptword == cryptopass or hashlib.sha512(word) == cryptword):
print('[+] Found password: ' + word + '\n')
return
print('[-] Password not found\n')
return
def main ():
with open('password.txt') as passfile:
for line in passfile.readlines():
if(':' in line):
user = line.split(':')[0]
cryptpass = line.split(':')[1],strip(' ')
print('[*] Cracking Password for: ' + user)
testPass(cryptpass)
if __name__ == "__main__":
main()
|
# MENU FUNCTIONALITY
def menu():
print("Need to remember how i did all this")
print("Think i just stole it from dragon fire")
|
from __future__ import print_function
from ggplot import *
print(ggplot(diamonds, aes(x='price')) + geom_histogram())
print(ggplot(diamonds, aes(x='price')) + geom_histogram(bins=50))
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ovid_api.settings.base")
import django
django.setup()
import json
from api.models import Author, Work, Book, Poem, Line
json_data = open('corpus.json')
data = json.load(json_data)
author = data['author']
works = data['works']
# clear the decks so there are no conflicts
Author.objects.all().delete()
Work.objects.all().delete()
Book.objects.all().delete()
Poem.objects.all().delete()
Line.objects.all().delete()
# start with the author. There's only one to worry about for now.
def add_author(name, full_name, abbreviation, slug):
a = Author.objects.create()
a.name = name
a.full_name = full_name
a.abbreviation = abbreviation
a.slug = slug
a.save()
return a
name = author[0]['name']
slug = author[1]['slug']
full_name = author[2]['full name']
abbreviation = author[3]['abbreviation']
add_author(name=name, full_name=full_name, abbreviation=abbreviation, slug=slug)
ovid = Author.objects.get(name="Ovid")
# loop through the JSON and create database objects as we go
for idx, work in enumerate(works, start=0):
w = Work()
w.title = works[idx]["title"]
w.abbreviation = works[idx]["abbreviation"]
w.slug = works[idx]["slug"]
w.author = ovid
books = works[idx]["books"]
w.save()
for book in books:
b = Book()
b.book_index = book['book_index']
b.title = book['book_title']
b.work = w
b.save()
poems = book["poems"]
for poem in poems:
p = Poem()
p.poem_index = poem['poem_index']
p.title = poem['poem_title']
p.book = b
p.save()
lines = poem['lines']
for line in lines:
print line['text']
print line['line_index']
l = Line()
l.text = line['text']
l.meter = line['meter']
l.line_index = line['line_index']
l.poem = p
l.save()
json_data.close()
|
# Generated by Django 2.2.2 on 2019-10-05 08:53
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('browser', '0020_auto_20191005_1418'),
]
operations = [
migrations.AlterField(
model_name='product',
name='end_date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2019, 10, 10, 14, 53, 13, 709055), null=True),
),
]
|
import keras
from keras.layers import Dense
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPool2D
from keras.layers import Flatten
import numpy as np
from numpy import genfromtxt # processing our csv files.
#Needed for unpacking the files
import shutil
import os
import tensorflow as tf
'''
1. Try to use the base signals (650000 index values).
2. Possibly one-hot encode to 1's and 0's and run the same
3. Use the binary images
'''
#/encoded_record_segments/
#https://www.tensorflow.org/tutorials/images/cnn
#https://stackoverflow.com/questions/46204569/how-to-handle-variable-sized-input-in-cnn-with-keras
#https://blog.goodaudience.com/introduction-to-1d-convolutional-neural-networks-in-keras-for-time-sequences-3a7ff801a2cf
'''
def extract_records(deleteZips=True):
all_zip_files = os.listdir(os.getcwd() + '/encoded_record_segments')
extract_dir = os.getcwd() + '/encoded_record_segments'
for file in all_zip_files:
filename = extract_dir + '/' + file
shutil.unpack_archive(filename, filename[:-4], 'zip')
if deleteZips is True:
os.remove(filename)
'''
def partition_data():
'''
train_data is considered 80% of the data available for training the model.
test_data represents 20% and will be used to test the accuracy and loss function
of the model after training.
'''
test_data, test_labels = [], []
filepath = os.getcwd() + '/all_signals/'
data = genfromtxt(filepath + '100.csv', delimiter=',')
'''
Each record has exactly 520 records of 1200 elements long + 1 element at the end
which is our label.
80% of 520 records is 416 records.
'''
train_index = int(len(data) * 0.8)
data_index = len(data[0])
train_data = [x[0:data_index] for x in data[0:train_index]]
test_data = [x[0:data_index] for x in data[train_index + 1:]]
train_labels = []
test_labels = []
'''
Positive data and labels done. Need to partition out our other records.
'number' represents the file number. Record 10 is ommited from the data file from MIT BIH
So we skip over record 10.
'''
'''
print(len(train_data))
print(len(train_labels))
print(len(test_data))
print(len(test_labels))
'''
record_names = [str(x) + '.csv' for x in range(101, 112) if x != 110]
for record in record_names:
data = genfromtxt(filepath + record, delimiter=',')
#print(len(data))
#negative will follow as: 160 records from each record for training and 40 for test.
neg_train_data = [x[0:data_index] for x in data[0:160]]
neg_test_data = [x[0:data_index] for x in data[161:201]]
for test_d in neg_test_data:
test_data.append(test_d)
for train_d in neg_train_data:
train_data.append(train_d)
np.random.shuffle(train_data)
np.random.shuffle(test_data)
train_labels = np.array([int(x[-1]) for x in train_data])
test_labels = np.array([int(x[-1]) for x in test_data])
train_data = np.array([x[0:data_index - 1] for x in train_data])
test_data = np.array([x[0:data_index - 1] for x in test_data])
train_labels = keras.utils.to_categorical(train_labels)
test_labels = keras.utils.to_categorical(test_labels)
train_data = train_data.reshape((2016, 1250, 1))
test_data = test_data.reshape((503, 1250, 1))
#Save files as binary files to save time on training.
np.save(filepath + 'traindata', train_data)
np.save(filepath + 'trainlabels', train_labels)
np.save(filepath + 'testdata', test_data)
np.save(filepath + 'testlabels', test_labels)
######################################################################################
if __name__ == '__main__':
#partition_data()
from cnn_model import model
filepath = os.getcwd() + '/all_signals/'
train_data = np.load(filepath + 'traindata.npy')
train_labels = np.load(filepath + 'trainlabels.npy')
test_data = np.load(filepath + 'testdata.npy')
test_labels = np.load(filepath + 'testlabels.npy')
#train_labels = keras.utils.to_categorical(train_labels)
#test_data = keras.utils.to_categorical(test_labels)
#test_data = np.expand_dims(test_data, axis=2)
#print(model.summary())
#print(train_data)
#Hyper parameter. Fine-tune then run again
BATCH_SIZE = 32
EPOCHS = 10
callbacks_list = [
keras.callbacks.ModelCheckpoint(
filepath=os.getcwd(),
monitor='val_loss',
save_best_only=True),
keras.callbacks.EarlyStopping(monitor='acc', patience=1)
]
#print(model.summary())
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
history = model.fit(
train_data, train_labels,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
callbacks=callbacks_list,
#validation_data=(test_data, test_labels)
)
score = model.evaluate(test_data, test_labels, verbose=0)
print('Test loss: {}'.format(score[0]))
print('Test accuracy: {}'.format(score[1]))
model.save(filepath + 'modelv1')
###########################################################################################
|
class animals(object):
def __init__(self,name,health):
self.name =name
self.health =health
def walk(self):
self.health -= 1
return self
def run(self):
self.health -= 5
return self
def display_health(self):
print self.health
return self
class dog(animals):
def __init__(self,name,health):
super(dog, self).__init__(name,health)
self.health =170
def pet(self):
self.health +=5
return self
class dragon(animals):
def __init__(self,name,health):
super(dragon,self).__init__(name,health)
self.health = 170
def fly(self):
self.health -=10
return self
def display_health(self):
super(dragon,self).display_health()
print "I am a dragon"
Wonbong = animals("cat",200)
Wonbong.walk().walk().walk().run().run().display_health()
Wooju = dog("dog",170)
Wooju.walk().walk().walk().run().run().display_health()
Dragoo = dragon("drangon",300)
Dragoo.fly().display_health() |
__author__ = 'Gabrielle Martin-Fortier'
from interface.interface_logs import Nouveau_quart
if __name__ == '__main__':
# Point d'entrée principal du TP4. Vous n'avez pas à modifier ce fichier, il ne sert qu'à exécuter votre programme.
# Le fichier à éditer est interface/interface_dames.py.
f = Nouveau_quart()
f.mainloop()
|
# Generated by Django 3.1.2 on 2020-10-10 16:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0005_auto_20201010_1550'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='phone',
field=models.CharField(max_length=15, verbose_name='Phone number'),
),
]
|
from django.db import models
class ProjectInfo(models.Model):
name = models.CharField(max_length=30,null=False,blank=False)
description = models.TextField(null=False,blank=False)
start_date = models.DateTimeField(null=False,blank=False)
end_date = models.DateTimeField(null=False,blank=False)
is_deleted = models.BooleanField(default=False)
createdon = models.DateTimeField(auto_now_add=True)
# createdby = models.ForeignKey(User)
lastmodon = models.DateTimeField(auto_now=True)
|
from graphene_django import DjangoObjectType
from graphene_crud_maker.utils import CustomNode, CustomConnection
from Api.models import Users
class UsersType(DjangoObjectType):
class Meta:
model = Users
filter_fields = {
'id': ['exact',],
}
interfaces = (CustomNode,)
connection_class = CustomConnection
|
#! /usr/bin/env python
# -*- coding UTF-8 -*-
# @ author: Yang Wu
# Email: wuyang.bnu@139.com
# import os
# import sys
# import time
# import gzip
# import tarfile
# import numpy as np
# import pandas as pd
# from functools import reduce
from os import listdir
class MyListDir():
def __init__(self, path, retain='', remove='', search=-1):
self.path = path
self.retain = retain
self.remove = remove
self.search = search
self.file_list = listdir(self.path)
self.fullfile_list = list(map(lambda x : "/".join([self.path, x]), self.file_list))
self.search_list = list(map(lambda x : x.split('/')[self.search], self.fullfile_list))
if self.retain:
self.retain_file_list()
if self.remove:
self.remove_file_list()
def retain_file_list(self):
if self.retain:
remove_id = []
for i in range(len(self.search_list)):
if self.retain not in self.search_list[i]:
remove_id.append(i)
self.remove_horse(remove_id)
def remove_file_list(self):
if self.remove:
remove_id = []
for i in range(len(self.search_list)):
if self.remove in self.search_list[i]:
remove_id.append(i)
self.remove_horse(remove_id)
def remove_horse(self, remove_id):
if remove_id:
for i in sorted(remove_id, reverse=True):
del self.file_list[i]
del self.fullfile_list[i]
del self.search_list[i]
if __name__ == '__main__':
# 获取所有染色体,并通过first, step切分任务
first, step = 0, 2
suffix = '.CGmap.gz'
# segment = 10000000
global segment
segment = 50000
sample = MyListDir(path='../data', retain='-M', search=-1) # 样本文件来源,自定义
chrfiles = MyListDir(path='../data/E-01-M', retain=suffix, remove='all', search=-1) # 染色体文件来源,自定义
all_list = list(map(lambda x : x.split(suffix)[0], chrfiles.search_list)) # 染色体前缀列表
run_list = all_list[first*step : first*step+step] # 选取部分跑
print(">>> Select chrs: \nall_list: {} ({})\nrun_list:{} ({})\n".
format(all_list, len(all_list), run_list, len(run_list))) # log information
|
"""Remove unused models
Revision ID: 3f289637f530
Revises: 4ba1dd8c3080
Create Date: 2014-04-17 11:08:50.963964
"""
# revision identifiers, used by Alembic.
revision = '3f289637f530'
down_revision = '4ba1dd8c3080'
from alembic import op
def upgrade():
op.drop_table('aggtestgroup')
op.drop_table('testgroup_test')
op.drop_table('testgroup')
op.drop_table('aggtestsuite')
def downgrade():
raise NotImplementedError
|
# -*- coding: utf-8 -*-
'''
Created on 6 de dez de 2018
@author: vgama
'''
from time import sleep
from lxml import etree as ET
class TelaUnica(object):
def __init__(self, context):
self.context = context
def abrir_soap(self):
self.context.asserts.verifica_tela(self.context.path+"data/images/soap.png",80)
self.context.app.clica_imagem(self.context.path+"data/images/soap.png", 1, "left", similar=60)
def abrir_metodo_getOutageById(self):
sleep(5)
self.context.app.clica_imagem(self.context.path+"data/images/soapTelaUnica.png", 1, "left", similar=70)
self.context.app.digitos(("right",2))
self.context.app.clica_imagem(self.context.path+"data/images/getOutageById.png", 1, "left", similar=60)
sleep(2)
self.context.app.digitos(("right",2),"enter")
sleep(3)
def preenche_id_telaUnica(self):
self.context.app.clica_imagem(self.context.path+"data/images/campoIdSoap.png",2,"left",similar=40)
self.context.app.escrever_direto("36368408")
self.context.app.clica_imagem(self.context.path+"data/images/executarSoap.png", 1, "left", similar=80)
sleep(3)
#self.context.app.digitos(("tab",24),"up")
#self.context.app.clica_imagem(self.context.path+"data/images/opcaoXMLSoap.png", 1, "left", similar=80)
sleep(3)
#self.context.app.combo_digitos("ctrl","s")
#self.context.app.escrever_direto("teste.xml")
#self.context.app.clica_imagem(self.context.path+"data/images/saveXML.png", 1, "left", similar=80)
def verificacao_xml(self):
'''
tree = ET.parse('teste.xml')
root = tree.getroot()
for child in root.iter('result'):
valor = child.text
resultado = int(valor)
assert resultado == 0
'''
self.context.app.clica_imagem(self.context.path+"data/images/ajustarTelaSOAP.png", 1, "left")
self.context.asserts.verifica_tela(self.context.path+"data/images/responseSOAP.png", 70)
self.context.app.clica_imagem(self.context.path+"data/images/responseSOAP.png", 1, "left")
self.context.app.clica_imagem(self.context.path+"data/images/opcaoXMLSoap.png", 1, "left", similar=80)
self.context.asserts.verifica_tela(self.context.path+"data/images/resultadoXMLTelaUnica.png", 80, similaridade=80)
def preenche_uc_telaUnica(self):
self.context.asserts.verifica_tela(self.context.path+"data/images/requestSOAP.png", 80)
self.context.app.clica_imagem(self.context.path+"data/images/campoIdSoap.png",2,"left",similar=40)
self.context.app.escrever_direto("1143980")
self.context.asserts.verifica_tela(self.context.path+"data/images/requestSOAP.png", 80)
self.context.app.clica_imagem(self.context.path+"data/images/executarSoap.png", 1, "left", similar=80)
sleep(3)
def abrir_metodo_getCallsByCustomer(self):
self.context.app.clica_imagem(self.context.path+"data/images/soapTelaUnica.png", 1, "left", similar=80)
self.context.app.digitos(("right",2))
self.context.app.clica_imagem(self.context.path+"data/images/opcaoCallsByCustomer.png", 1, "left", similar=80)
sleep(2)
self.context.app.digitos(("right",2),"enter")
sleep(3)
def abrir_metodo_getOutagesByCustomer(self):
self.context.app.clica_imagem(self.context.path+"data/images/soapTelaUnica.png", 1, "left", similar=80)
self.context.app.digitos(("right",2))
self.context.app.clica_imagem(self.context.path+"data/images/getOutagesByCustomer.png", 1, "left", similar=80)
sleep(2)
self.context.app.digitos(("right",2),"enter")
sleep(3)
def preenche_campo_uc_telaUnica(self):
self.context.asserts.verifica_tela(self.context.path+"data/images/requestSOAP.png", 80)
self.context.app.clica_imagem(self.context.path+"data/images/campoIdSoap.png", 2,"left",similar=40)
self.context.app.escrever_direto("9102434")
self.context.asserts.verifica_tela(self.context.path+"data/images/requestSOAP.png", 80)
self.context.app.clica_imagem(self.context.path+"data/images/executarSoap.png", 1, "left", similar=80)
sleep(3)
def abrir_metodo_getCallByLabel(self):
self.context.app.clica_imagem(self.context.path+"data/images/soapTelaUnica.png", 1, "left", similar=80)
self.context.app.digitos(("right",5),"enter")
sleep(3)
def preenche_campo_rotulo_telaUnica(self):
self.context.asserts.verifica_tela(self.context.path+"data/images/requestSOAP.png", 80)
self.context.app.clica_imagem(self.context.path+"data/images/campoIdSoap.png", 3,"left",similar=40)
self.context.app.escrever_direto("2015-146221")
self.context.app.clica_imagem(self.context.path+"data/images/executarSoap.png", 1, "left", similar=80)
sleep(3)
def abrir_metodo_insertTroubleCall(self):
self.context.app.clica_imagem(self.context.path+"data/images/soapTelaUnica.png", 1, "left", similar=80)
self.context.app.digitos(("right",2))
self.context.app.clica_imagem(self.context.path+"data/images/insertTroubleCall.png", 1, "left", similar=80)
sleep(2)
self.context.app.digitos(("right",2),"enter")
sleep(3)
def preencher_campos_insertTroubleCall(self):
self.context.app.clica_imagem(self.context.path+"data/images/campoUC.png", 2, "left", similar=40)
self.context.app.escrever_direto("39654605") #UC
self.context.app.clica_imagem(self.context.path+"data/images/callerName.png", 2, "left", similar=40)
self.context.app.escrever_direto("ADRIELI VITOR") #name
self.context.app.clica_imagem(self.context.path+"data/images/phone.png", 2, "left", similar=40)
self.context.app.escrever_direto("81758213") #fone
self.context.app.clica_imagem(self.context.path+"data/images/vizinhanca.png", 2, "left", similar=40)
self.context.app.escrever_direto("397") #vizinhan�a
self.context.app.clica_imagem(self.context.path+"data/images/endereco.png", 3, "left", similar=40)
self.context.app.escrever_direto("RUA ABILIO ZANCA, 571") #endereco
self.context.app.clica_imagem(self.context.path+"data/images/cidade.png", 2, "left", similar=40)
self.context.app.escrever_direto("LEME") #cidade
self.context.app.clica_imagem(self.context.path+"data/images/estadoSoap.png", 2, "left", similar=40)
self.context.app.escrever_direto("1") #estado
self.context.app.clica_imagem(self.context.path+"data/images/location.png", 2, "left", similar=40)
self.context.app.escrever_direto("1") #location
self.context.app.clica_imagem(self.context.path+"data/images/condicaoClima.png", 2, "left", similar=40)
self.context.app.escrever_direto("A") #condicao de tempo
self.context.app.clica_imagem(self.context.path+"data/images/callOrigin.png", 2, "left", similar=40)
self.context.app.escrever_direto("1") #callOrigin
self.context.app.clica_imagem(self.context.path+"data/images/callType.png", 2, "left", similar=40)
self.context.app.escrever_direto("FORN") #tipo
self.context.app.clica_imagem(self.context.path+"data/images/callSubType.png", 2, "left", similar=40)
self.context.app.escrever_direto("FE01") #subtipo
self.context.app.digitos(("down",13))
sleep(4)
self.context.app.clica_imagem(self.context.path+"data/images/informacaoImportante.png", 2, "left", similar=40)
self.context.app.escrever_direto("N") #informacao importante
self.context.app.clica_imagem(self.context.path+"data/images/receiveDate.png", 3, "left", similar=40)
self.context.app.escrever_direto("11/02/2019 09:45:28") #receive data
self.context.app.clica_imagem(self.context.path+"data/images/scheduleDate.png", 3, "left", similar=40)
self.context.app.escrever_direto("15/03/2010 12:21:10") # data final
self.context.app.clica_imagem(self.context.path+"data/images/deathRisk.png", 2, "left", similar=40)
self.context.app.escrever_direto("N") #risco de morte
self.context.app.clica_imagem(self.context.path+"data/images/confirmTroubleCall.png", 2, "left", similar=40)
self.context.app.escrever_direto("N") #TroubeCall
self.context.app.clica_imagem(self.context.path+"data/images/confirmProcess.png", 2, "left", similar=40)
self.context.app.escrever_direto("N") #Process
self.context.app.clica_imagem(self.context.path+"data/images/executarSoap.png", 1, "left", similar=40)
sleep(10)
def abrir_metodo_getCustomerSituation(self):
self.context.app.clica_imagem(self.context.path+"data/images/soapTelaUnica.png", 1, "left", similar=80)
self.context.app.digitos(("right",2))
self.context.app.clica_imagem(self.context.path+"data/images/getCustomerSituation.png", 1, "left", similar=80)
sleep(2)
self.context.app.digitos(("right",2),"enter")
sleep(3)
def verifica_customer_situation(self):
self.context.app.clica_imagem(self.context.path+"data/images/ajustarTelaSOAP.png", 1, "left")
self.context.asserts.verifica_tela(self.context.path+"data/images/responseSOAP.png", 70)
self.context.app.clica_imagem(self.context.path+"data/images/responseSOAP.png", 1, "left")
self.context.app.clica_imagem(self.context.path+"data/images/opcaoXMLSoap.png", 1, "left", similar=80)
self.context.asserts.verifica_tela(self.context.path+"data/images/resultadoCustomerSituation.png",80)
''' Historical Process GMT '''
def abrir_metodo_historicalProcessGMT(self):
self.context.app.clica_imagem(self.context.path+"data/images/soapTelaUnica.png", 1, "left", similar=80)
self.context.app.digitos(("right",2))
self.context.app.clica_imagem(self.context.path+"data/images/historicalProcessGMT.png", 1, "left", similar=80)
sleep(2)
self.context.app.digitos(("right",2),"enter")
sleep(3)
def preenche_campo_uc_gmt(self):
self.context.asserts.verifica_tela(self.context.path+"data/images/requestSOAP.png", 80)
self.context.app.clica_imagem(self.context.path+"data/images/campoIdSoap.png", 2,"left",similar=40)
self.context.app.escrever_direto("35032049")
self.context.app.clica_imagem(self.context.path+"data/images/executarSoap.png", 1, "left", similar=80)
sleep(3)
''' Inclusao de Pedido '''
def abrir_metodo_insertClientRequest(self):
self.context.app.clica_imagem(self.context.path+"data/images/soapTelaUnica.png", 1, "left", similar=80)
self.context.app.digitos(("right",2))
self.context.app.clica_imagem(self.context.path+"data/images/insertClientRequest.png", 1, "left", similar=80)
sleep(2)
self.context.app.digitos(("right",2),"enter")
sleep(3)
def preencher_dados_pedido(self):
self.context.app.clica_imagem(self.context.path+"data/images/orderRequest.png", 2, "left", similar=40 )
self.context.app.escrever_direto("Penha seu gostoso")
sleep(10) |
from traffic.models.PathEstimationModel import PathEstimationModel
from traffic.models.PathLengthModel import PathLengthModel
from traffic.queries.GoogleMapsRouteQuery import GoogleMapsRouteQuery
from traffic.shared.responses.GoogleResponseObject import GoogleResponseObject
from django.db.models import Avg
from traffic import GOOGLE_API_KEY
class PathEstimationRepo:
def GetEstimationForPath(self, pathInstance):
EstimatedInstances = PathEstimationModel.get_manager()
return EstimatedInstances.all().filter(pathInstance=pathInstance)[0]
def RecordNewEstimationForPath(self, path):
res = GoogleMapsRouteQuery(Starting_Location=path.StartingLocation,
Ending_Location=path.EndingLocation,
Api_Key=GOOGLE_API_KEY).getResult()
pathLengthManager = PathLengthModel.get_manager()
pathLengthManager.create(
pathInstance=path,
secondsPerTrip=res.getDuration()["value"]
)
def RecalculateEstimateForPath(self, pathInstance):
pathLengthManager = PathLengthModel.get_manager()
avgVal = pathLengthManager.all().filter(
pathInstance=pathInstance
).aggregate(value=Avg('secondsPerTrip'))
return avgVal
|
import maya.cmds as cmds
import vectors ; from vectors import *
import math
class Obstacle:
def __init__(self, name, r = 1.0, h = 6.0, x = 0.0, y = 0.0, z = 0.0):
self._name = name
self.force = 4.0
self.direction = V(0.0, 1.0, 0.0)
if cmds.objExists(name):
self.position = V(cmds.getAttr("{0}.translate".format(name))[0])
obstacleScale = cmds.getAttr("{0}.scale".format(name))[0]
self.radius = cmds.polyCylinder(name, query=True, radius=True) * obstacleScale[0] + 1.0
self.height = cmds.polyCylinder(name, query=True, height=True) * obstacleScale[1]
else:
print "box with name \"{0}\" does not exist".format(name)
self.radius = r
self.height = h
self.position = V(x, y, z)
print "reload"
print self.position
print self.height
def distanceFrom(self, v):
ObstacleToV = v - self.position
parallel = (ObstacleToV.dot(self.direction)) * self.direction
orthogonal = v - (self.position + parallel)
return orthogonal.magnitude()
def intersects(self, v):
ObstacleToV = v - self.position
parallel = (ObstacleToV.dot(self.direction)) * self.direction
orthogonal = v - (self.position + parallel)
#if intersects
if ((parallel.magnitude() < (self.height / 2)) and (orthogonal.magnitude() < self.radius)):
return True
else:
return False
def orthoProject(self, v):
'''Project v on ostacle and return the result.'''
ObstacleToV = v - self.position
parallel = (ObstacleToV.dot(self.direction)) * self.direction
orthogonal = v - (self.position + parallel)
return orthogonal.magnitude(self.radius - orthogonal.magnitude())
def delete(self):
cmds.delete(at = self._name) |
# -*- coding: utf-8 -*-
import collections
import functools
import pymysql
from com.caeit.jn2xx.config.configParam import ConfigParameters
from com.caeit.jn2xx.utils.datatypeUtil import strWrappedQuota
"""
0. 提供数据库增、删、改、查基本功能
1. 安装pymysql模块 pip3 install pymysql
"""
__author__ = "Eugene Gao"
__date__ = "2018.08.10"
class MysqlOperator(object):
# 数据库增、删、改、查等操作的装饰器,用来控制数据库连接行为
def dbOpenClose(func):
@functools.wraps(func)
def run(self, db, *args, **kwargs):
print("call %s():" % func.__name__)
# 创建游标
cursor = db.cursor()
try:
# 运行sql语句
cursor.execute(func(self, db, *args, **kwargs))
# 得到返回值
results = cursor.fetchall()
print("execute results: ", type(results), len(results), results)
# 提交事务
db.commit()
except Exception as e:
# 如果出现错误,回滚事务
db.rollback()
# 打印报错信息
print("运行", str(func), "方法时出现错误,错误代码:", e)
finally:
# 关闭游标和数据库连接
cursor.close()
try:
# 返回sql执行信息
return results
except Exception as e:
print("没有得到返回值,请检查代码,该信息出现在 MysqlOperator 类中的装饰器方法")
finally:
pass
return run
@dbOpenClose
def exec_staticsql(self, dbConnect, sql):
print("exec_staticsql sql:", sql)
return sql
@dbOpenClose
def create_table(self, dbConnect, table_name, fields_orderdict):
# 使用预处理语句创建表
sql = "create table if not exists " + table_name + " ( "
for key, value in fields_orderdict.items():
sql = sql + key + " " + value + ", "
# 删除最后的逗号, 增加右侧括号
sql = sql[:-2] + " )"
print("create_table sql:", sql)
return sql
@dbOpenClose
def insert(self, dbConnect, table_name, fields_orderdict):
fieldsql = ''
valuesql = ''
for key, value in fields_orderdict.items():
fieldsql += key + ", "
valuesql += strWrappedQuota(value) + ", "
# 删除最后的 ", "
fieldsql = fieldsql[:-2]
valuesql = valuesql[:-2]
sql = "insert into " + table_name + " ( " + fieldsql + " ) values ( " + valuesql + " )"
print("insert sql:", sql)
return sql
@dbOpenClose
def __select(self, dbConnect, table_name, select_list, where_orderdict):
selectsql = ''
for key in select_list:
selectsql += key + ", "
# 删除最后的 ", "
selectsql = selectsql[:-2]
wheresql = ''
for key,value in where_orderdict.items():
wheresql += key + " = " + strWrappedQuota(value) + " and "
# 删除最后的 "and "
wheresql = wheresql[:-4]
sql = "select " + selectsql + " from " + table_name + " where " + wheresql
print("select sql:", sql)
return sql
@dbOpenClose
def update(self, dbConnect, table_name, update_orderdict, where_orderdict):
updatesql = ''
for key, value in update_orderdict.items():
updatesql += key + " = " + strWrappedQuota(value) + ", "
# 删除最后的 "and "
updatesql = updatesql[:-2]
wheresql = ''
for key, value in where_orderdict.items():
wheresql += key + " = " + strWrappedQuota(value) + " and "
# 删除最后的 "and "
wheresql = wheresql[:-4]
sql = "update " + table_name + " set " + updatesql + " where " + wheresql
print("update sql:", sql)
return sql
@dbOpenClose
def delete(self, dbConnect, table_name, where_orderdict):
wheresql = ''
for key, value in where_orderdict.items():
wheresql += key + " = " + strWrappedQuota(value) + " and "
# 删除最后的 "and "
wheresql = wheresql[:-4]
sql = "delete from " + table_name + " where " + wheresql
print("delete sql:", sql)
return sql
def selectAll(self, dbConnect, table_name, select_list, where_orderdict):
"""
支持返回多条记录
:param dbConnect: 数据库连接
:param table_name: 操作表名
:param select_list: SQL selectOne 字段构成的 list
:param where_orderdict: SQL where 字段构成的 dict
:return: fetchRecord_list:长度为0表示没有结果,支持返回多条 record 的list,每个 list中的元素就是一行 record
"""
fetchRecord_list = list()
record_list = self.__select(dbConnect, table_name, select_list, where_orderdict)
if len(record_list) == 0:
return fetchRecord_list
else:
for recordIdx in range(0, len(record_list)):
record = record_list[recordIdx]
fetch_orderdict = collections.OrderedDict()
for index in range(0, len(record)):
fetch_orderdict[select_list[index]] = record[index]
print("MysqlOperator:selectOne:fetch_orderdict: ", fetch_orderdict)
fetchRecord_list.append(fetch_orderdict)
return fetchRecord_list
def selectOne(self, dbConnect, table_name, select_list, where_orderdict):
# 返回一条记录
fetchRecord_list = self.selectAll(dbConnect, table_name, select_list, where_orderdict)
if len(fetchRecord_list) == 0:
return fetchRecord_list
else:
# 如果记录不为空,则返回第一条记录
return fetchRecord_list[0]
def main(dbConnect):
table_name = "employee"
# 删除表---------------------------------------------------------------------------------
staticsql = "drop table if exists " + table_name
MysqlOperator().exec_staticsql(dbConnect, staticsql)
# 创建表---------------------------------------------------------------------------------
fields_orderdict = collections.OrderedDict()
fields_orderdict["first_name"] = "char(20) not null"
fields_orderdict["last_name"] = "char(20)"
fields_orderdict["bir"] = "datetime"
fields_orderdict["age"] = "int"
fields_orderdict["sex"] = "char(1)"
fields_orderdict["income"] = "double"
MysqlOperator().create_table(dbConnect, table_name, fields_orderdict)
#插入表数据---------------------------------------------------------------------------------
# insertsql = '''insert into employee (first_name, last_name, bir, age, sex, income)
# values ("shumeng", "zhu", "1982-07-31 19:30:00", 32, "f", 7500)'''
fields_orderdict["first_name"] = "minhe"
fields_orderdict["last_name"] = "gao"
fields_orderdict["bir"] = "2015-02-01 19:30:00"
fields_orderdict["age"] = 3
fields_orderdict["sex"] = "M"
fields_orderdict["income"] = 40000.75
MysqlOperator().insert(dbConnect, table_name, fields_orderdict)
# 查询表数据---------------------------------------------------------------------------------
# selectsql = '''selectOne * from employee where first_name = 'qian' and last_name = 'gao' '''
select_list = list(fields_orderdict.keys())
where_orderdict = collections.OrderedDict()
# where_orderdict = {column_name : value}
where_orderdict["first_name"] = "minhe"
where_orderdict["last_name"] = "gao"
fetchRecord_list = MysqlOperator().selectOne(dbConnect, table_name, select_list, where_orderdict)
# 更新表数据---------------------------------------------------------------------------------
# updatesql = '''update employee set income = 50000.55 where first_name = 'minhe' and last_name = 'gao' '''
update_orderdict = collections.OrderedDict()
# update_orderdict = {column_name : value}
update_orderdict["income"] = 6000.66
where_orderdict = collections.OrderedDict()
# where_orderdict = {column_name : value}
where_orderdict["first_name"] = "minhe"
where_orderdict["last_name"] = "gao"
MysqlOperator().update(dbConnect, table_name, update_orderdict, where_orderdict)
# 删除表数据---------------------------------------------------------------------------------
# deletesql = '''delete from employee where first_name = 'minhe' and last_name = 'gao' '''
MysqlOperator().delete(dbConnect, table_name, where_orderdict)
if __name__ == "__main__":
# 配置数据库
dbconfig_dict = ConfigParameters().dbconfig_local_test
# 创建数据库连接
dbConnect = pymysql.connect(host=dbconfig_dict["host"], port=dbconfig_dict["port"],
user=dbconfig_dict["user"], password=dbconfig_dict["password"],
database=dbconfig_dict["dataBase"], charset=dbconfig_dict["charset"])
print(dbconfig_dict)
main(dbConnect)
dbConnect.close()
|
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensor2tensor.envs.tic_tac_toe_env_problem."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.envs import env_problem_utils
from tensor2tensor.envs import tic_tac_toe_env # pylint: disable=unused-import
from tensor2tensor.envs import tic_tac_toe_env_problem # pylint: disable=unused-import
from tensor2tensor.utils import registry
import tensorflow as tf
class TicTacToeEnvProblemTest(tf.test.TestCase):
def test_registration_and_interaction_with_env_problem(self):
batch_size = 5
# This ensures that registration has occurred.
ep = registry.env_problem("tic_tac_toe_env_problem", batch_size=batch_size)
ep.reset()
num_done, num_lost, num_won, num_draw = 0, 0, 0, 0
nsteps = 100
for _ in range(nsteps):
actions = np.stack([ep.action_space.sample() for _ in range(batch_size)])
obs, rewards, dones, infos = ep.step(actions)
# Assert that things are happening batchwise.
self.assertEqual(batch_size, len(obs))
self.assertEqual(batch_size, len(rewards))
self.assertEqual(batch_size, len(dones))
self.assertEqual(batch_size, len(infos))
done_indices = env_problem_utils.done_indices(dones)
ep.reset(done_indices)
num_done += sum(dones)
for r, d in zip(rewards, dones):
if not d:
continue
if r == -1:
num_lost += 1
elif r == 0:
num_draw += 1
elif r == 1:
num_won += 1
else:
raise ValueError("reward should be -1, 0, 1 but is {}".format(r))
# Assert that something got done atleast, without that the next assert is
# meaningless.
self.assertGreater(num_done, 0)
# Assert that things are consistent.
self.assertEqual(num_done, num_won + num_lost + num_draw)
if __name__ == "__main__":
tf.test.main()
|
# -*- coding: utf-8 -*-
"""Pub/Sub pull example on Google Kubernetes Engine.
This program pulls messages from a Cloud Pub/Sub topic and
prints to standard output.
"""
import datetime
import time
# [START gke_pubsub_pull]
# [START container_pubsub_pull]
from google import auth
from google.cloud import pubsub_v1
def main():
"""Continuously pull messages from subsciption"""
# read default project ID
_, project_id = auth.default()
subscription_id = 'echo-read'
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id, subscription_id)
def callback(message: pubsub_v1.subscriber.message.Message) -> None:
"""Process received message"""
print(f"Received message: ID={message.message_id} Data={message.data}")
print(f"[{datetime.datetime.now()}] Processing: {message.message_id}")
time.sleep(3)
print(f"[{datetime.datetime.now()}] Processed: {message.message_id}")
message.ack()
streaming_pull_future = subscriber.subscribe(
subscription_path, callback=callback)
print(f"Pulling messages from {subscription_path}...")
with subscriber:
try:
streaming_pull_future.result()
except Exception as e:
print(e)
# [END container_pubsub_pull]
# [END gke_pubsub_pull]
if __name__ == '__main__':
main()
|
from django.contrib import admin
from .models import Cat
from .models import Breed
# Register your models here.
admin.site.register(Cat)
admin.site.register(Breed) |
'''
Train a CRF sequence tagging and global label prediction model on top of the eukarya AWD-LSTM model.
Reports a bunch of metrics to w&b. For hyperparameter search, we optimize the AUC of the global label and F1 of the cleavage site (no tolerance window)
Hyperparameters to be optimized:
- learning rate
- classifier hidden size
- batch size
Special for eukarya - less complex as full SignalP:
position labels {'I', 'M', 'O', 'S'}
global labels {0,1}
TODO decide on correct checkpoint selection: Right now, saves lowest validation loss checkpoint.
If I want to trade off the different metrics, cannot just use the best loss.
'''
#Felix August 2020
import argparse
import time
import math
import numpy as np
import torch
import logging
import torch.nn as nn
import sys
sys.path.append("..")
from typing import Tuple
sys.path.append("/zhome/1d/8/153438/experiments/master-thesis/") #to make it work on hpc, don't want to install in venv yet
from models.awd_lstm import ProteinAWDLSTMConfig
from models.sp_tagging_awd_lstm import ProteinAWDLSTMSequenceTaggingCRF
from tape import visualization #import utils.visualization as visualization
from train_scripts.utils.signalp_dataset import ThreeLineFastaDataset
from torch.utils.data import DataLoader
from apex import amp
import data
import os
import wandb
import random
import hashlib
from sklearn.metrics import matthews_corrcoef, average_precision_score, roc_auc_score
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.setLevel(logging.INFO)
c_handler = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%y/%m/%d %H:%M:%S")
c_handler.setFormatter(formatter)
logger.addHandler(c_handler)
def training_step(model: torch.nn.Module, data: torch.Tensor, targets: torch.Tensor, optimizer: torch.optim.Optimizer,
args: argparse.ArgumentParser, i: int) -> (float, tuple):
'''Predict one minibatch and performs update step.
Returns:
loss: loss value of the minibatch
'''
data = data.to(device)
targets = targets.to(device)
global_targets = (targets == 0).any(axis =1) *1 #binary signal peptide existence indicator
model.train()
optimizer.zero_grad()
loss, _, _, _ = model(data,
global_targets = global_targets,
targets= targets )
if torch.cuda.is_available():
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
if args.clip:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
return loss.item()
def sp_metrics_global(probs: np.ndarray, targets: np.ndarray, threshold = 0.5):
'''Sequence global label classification metrics'''
prc = average_precision_score(targets, probs)
auc = roc_auc_score(targets, probs)
probs_thresholded = (probs >=threshold) *1
mcc = matthews_corrcoef(targets, probs_thresholded)
return {'AUPRC detection': prc, 'AUC detection': auc, 'MCC detection': mcc}
def sp_metrics_sequence(preds: np.ndarray, tags: np.ndarray):
'''Sequence tagging metrics.
preds: (batch_size, seq_len, num_classes) array of probabilities
tags: (batch_size, seq_len) array of true tags
'''
#TODO what metrics make sense? multiclass-classification technically, but correct labels are not really of interest
return NotImplementedError
def cs_detection_from_sequence(predictions: np.ndarray, tags: np.ndarray, window = 1):
'''Compute cleavage site detection metrics from predicted and true tag sequences'''
#0 is the label for SP, only calculate for sequences that have SP
def get_true_positives_false_negatives(predictions, tags, window_size):
'''because of tolerance window, these metrics need to be calculated separately instead of just using sklearn.
'''
presence_indicators = (tags == 0).any(axis =1) #0-no sp, 1- has sp
preds_pos = predictions[presence_indicators]
tags_pos = tags[presence_indicators]
cs_true = (tags_pos ==0).sum(axis =1)-1 #index of last value ==0 for each sequence NOTE assumes that 0s are contiguous. If 0s are not contiguous tags are wrong anyway.
cs_pred = (preds_pos ==0).sum(axis =1)-1
window_borders_low = cs_true - window_size
window_borders_high = cs_true + window_size +1
result = (cs_pred >= window_borders_low) & (cs_pred <= window_borders_high)
result = result*1 #bool to numbers
true_positives = result.sum()
false_negatives= (result == False).sum()
return true_positives, false_negatives
def get_true_negatives_false_positives(predictions, tags):
'''These metrics only make sense on a global level - Prediction of a SP implies prediction of CS, No SP = also no CS '''
presence_indicators = (tags == 0).any(axis =1) #0-no sp, 1- has sp
preds_neg = predictions[~presence_indicators]
tags_neg = tags[~presence_indicators]
predicted_label = (preds_neg == 0).any(axis =1) #True: has sp, False: no sp
false_positives = predicted_label.sum()
true_negatives = (predicted_label == False).sum()
return true_negatives, false_positives
true_pos, false_neg = get_true_positives_false_negatives(predictions, tags, window)
true_neg, false_pos = get_true_negatives_false_positives(predictions, tags)
recall = true_pos / (true_pos + false_neg)
precision = true_pos / (true_pos + false_pos) #precision does not make any sense when i don't include false pos
f1_score = 2 * (precision * recall) / (precision + recall)
return {'CS Precision': precision, 'CS Recall':recall, 'CS F1': f1_score}
def validate(model: torch.nn.Module, valid_data: DataLoader) -> float:
'''Run over the validation data. Average loss over the full set.
'''
model.eval()
all_targets = []
all_global_targets = []
all_global_probs = []
all_pos_preds = []
total_loss = 0
for i, batch in enumerate(valid_data):
data, targets = batch
data = data.to(device)
targets = targets.to(device)
global_targets = (targets == 0).any(axis =1) *1 #binary signal peptide existence indicator
loss, global_probs, pos_probs, pos_preds = model(data, global_targets = global_targets, targets= targets )
total_loss += loss.item()
all_targets.append(targets.detach().cpu().numpy())
all_global_targets.append(global_targets.detach().cpu().numpy())
all_global_probs.append(global_probs.detach().cpu().numpy())
all_pos_preds.append(pos_preds.detach().cpu().numpy())
all_targets = np.concatenate(all_targets)
all_global_targets = np.concatenate(all_global_targets)
all_global_probs = np.concatenate(all_global_probs)
all_pos_preds = np.concatenate(all_pos_preds)
#TODO currently global_probs come as (n_batch, 2). When moving to binary crossentropy changes to n_batch. then need to change wandb.plots.ROC
try:
global_roc_curve = wandb.plots.ROC(all_global_targets, all_global_probs, [0,1])
except:
global_roc_curve = np.nan #sometimes wandb roc fails for numeric reasons
all_global_probs = all_global_probs[:,1]
global_metrics = sp_metrics_global(all_global_probs, all_global_targets)
cs_metrics = cs_detection_from_sequence(all_pos_preds, all_targets)
val_metrics = {'loss': total_loss / len(valid_data), **cs_metrics, **global_metrics }
return (total_loss / len(valid_data)), val_metrics, global_roc_curve
def main_training_loop(args: argparse.ArgumentParser):
if args.enforce_walltime == True:
loop_start_time = time.time()
logger.info('Started timing loop')
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
#Setup Model
#TODO how to inject new num_labels here?
logger.info(f'Loading pretrained model in {args.resume}')
config = ProteinAWDLSTMConfig.from_pretrained(args.resume)
#patch LM model config for new downstream task
setattr(config, 'num_labels', args.num_labels)
setattr(config, 'num_global_labels', 2)
setattr(config, 'classifier_hidden_size', args.classifier_hidden_size)
setattr(config, 'use_crf', True)
setattr(config, 'global_label_loss_multiplier', args.global_label_loss_multiplier)
setattr(config, 'use_rnn', args.use_rnn)
if args.use_rnn == True: #rnn training way more expensive than MLP
setattr(args, 'epochs', 200)
model = ProteinAWDLSTMSequenceTaggingCRF.from_pretrained(args.resume, config = config)
#training logger
time_stamp = time.strftime("%y-%m-%d-%H-%M-%S", time.gmtime())
experiment_name = f"{args.experiment_name}_{time_stamp}"
viz = visualization.get(args.output_dir, experiment_name, local_rank = -1) #debug=args.debug) #this -1 means traning is not distributed, debug makes experiment dry run for wandb
train_data = ThreeLineFastaDataset(os.path.join(args.data, 'train.fasta'))
val_data = ThreeLineFastaDataset(os.path.join(args.data, 'valid.fasta'))
train_loader = DataLoader(train_data, batch_size =args.batch_size, collate_fn= train_data.collate_fn, shuffle = True)
val_loader = DataLoader(val_data, batch_size =args.batch_size, collate_fn= train_data.collate_fn)
logger.info(f'Data loaded. One epoch = {len(train_loader)} batches.')
if args.optimizer == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
if args.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
model.to(device)
logger.info('Model set up!')
num_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
logger.info(f'Model has {num_parameters} trainable parameters')
if torch.cuda.is_available():
model, optimizer = amp.initialize(model, optimizer, opt_level='O0')#'O1')
else :
logger.info(f'Running model on {device}, not using nvidia apex')
#set up wandb logging, tape visualizer class takes care of everything. just login to wandb in the env as usual
viz.log_config(args)
viz.log_config(model.config.to_dict())
viz.watch(model)
logger.info(f'Logging experiment as {experiment_name} to wandb/tensorboard')
#keep track of best loss
stored_loss = 100000000
learning_rate_steps = 0
num_epochs_no_improvement = 0
global_step = 0
best_AUC_globallabel = 0
best_F1_cleavagesite = 0
for epoch in range(1, args.epochs+1):
logger.info(f'Starting epoch {epoch}')
viz.log_metrics({'Learning Rate': optimizer.param_groups[0]['lr'] }, "train", global_step)
epoch_start_time = time.time()
start_time = time.time() #for lr update interval
for i, batch in enumerate(train_loader):
data, targets = batch
loss = training_step(model, data, targets, optimizer, args, i)
viz.log_metrics({'loss': loss}, "train", global_step)
global_step += 1
logger.info(f'Step {global_step}, Epoch {epoch}: validating for {len(val_loader)} Validation steps')
val_loss, val_metrics, roc_curve = validate(model, val_loader)
viz.log_metrics(val_metrics, "val", global_step)
if epoch == args.epochs:
viz.log_metrics({'Detection roc curve': roc_curve}, 'val', global_step)
#keep track of optimization targets
if (val_metrics['AUC detection'] <= best_AUC_globallabel) and (val_metrics['CS F1'] <= best_F1_cleavagesite):
num_epochs_no_improvement += 1
else:
num_epochs_no_improvement = 0
best_AUC_globallabel = max(val_metrics['AUC detection'], best_AUC_globallabel)
best_F1_cleavagesite = max(val_metrics['CS F1'], best_F1_cleavagesite)
if val_loss < stored_loss:
model.save_pretrained(args.output_dir)
#also save with apex
if torch.cuda.is_available():
checkpoint = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'amp': amp.state_dict()
}
torch.save(checkpoint, os.path.join(args.output_dir, 'amp_checkpoint.pt'))
logger.info(f'New best model with loss {val_loss}, AUC {best_AUC_globallabel}, F1 {best_F1_cleavagesite}, Saving model, training step {global_step}')
stored_loss = val_loss
if (epoch>100) and (num_epochs_no_improvement > 10):
logger.info('No improvement for 10 epochs, ending training early.')
logger.info(f'Best: AUC {best_AUC_globallabel}, F1 {best_F1_cleavagesite}')
return (val_loss, best_AUC_globallabel, best_F1_cleavagesite)
if args.enforce_walltime == True and (time.time() - loop_start_time) > 84600: #23.5 hours
logger.info('Wall time limit reached, ending training early')
logger.info(f'Best: AUC {best_AUC_globallabel}, F1 {best_F1_cleavagesite}')
return (val_loss, best_AUC_globallabel, best_F1_cleavagesite)
logger.info(f'Epoch {epoch} training complete')
logger.info(f'Epoch {epoch}, took {time.time() - epoch_start_time:.2f}.\t Train loss: {loss:.2f}')
return (val_loss, best_AUC_globallabel, best_F1_cleavagesite)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='AWD-LSTM language modeling')
parser.add_argument('--data', type=str, default='../data/data/signalp_5_data/',
help='location of the data corpus. Expects test, train and valid .txt')
#args relating to training strategy.
parser.add_argument('--lr', type=float, default=10,
help='initial learning rate')
parser.add_argument('--lr_step', type = float, default = 0.9,
help = 'factor by which to multiply learning rate at each reduction step')
parser.add_argument('--update_lr_steps', type = int, default = 6000,
help = 'After how many update steps to check for learning rate update')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=8000,
help='upper epoch limit')
parser.add_argument('--wait_epochs', type = int, default = 3,
help='Reduce learning rates after wait_epochs epochs without improvement')
parser.add_argument('--batch_size', type=int, default=80, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=70,
help='sequence length')
parser.add_argument('--wdecay', type=float, default=1.2e-6,
help='weight decay applied to all weights')
parser.add_argument('--optimizer', type=str, default='sgd',
help='optimizer to use (sgd, adam)')
parser.add_argument('--reset_hidden', type=bool, default=False,
help = 'Reset the hidden state after encounter of the tokenizer stop token')
parser.add_argument('--log_interval', type=int, default=10000, metavar='N',
help='report interval')
parser.add_argument('--output_dir', type=str, default=f'{time.strftime("%Y-%m-%d",time.gmtime())}_awd_lstm_lm_pretraining',
help='path to save logs and trained model')
parser.add_argument('--wandb_sweep', type=bool, default=False,
help='wandb hyperparameter sweep: Override hyperparams with params from wandb')
parser.add_argument('--resume', type=str, default='',
help='path of model to resume (directory containing .bin and config.json')
parser.add_argument('--experiment_name', type=str, default='AWD_LSTM_LM',
help='experiment name for logging')
parser.add_argument('--enforce_walltime', type=bool, default =True,
help='Report back current result before 24h wall time is over')
parser.add_argument('--override_checkpoint_saving', action='store_true',
help= 'keep model weights after --epochs, not at the best loss during the run. Useful when training metric target does not correspond to best loss.')
parser.add_argument('--global_label_loss_multiplier', type=float, default = 1.0,
help='multiplier for the crossentropy loss of the global label prediction. Use for sequence tagging/ global label performance tradeoff')
#args for model architecture
parser.add_argument('--use_rnn', action='store_true',
help='use biLSTM instead of MLP for emissions')
parser.add_argument('--classifier_hidden_size', type=int, default=128, metavar='N',
help='Hidden size of the classifier head MLP')
parser.add_argument('--num_labels', type=int, default=4, metavar='N',
help='Number of labels for the classifier head')
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
#make unique output dir
time_stamp = time.strftime("%y-%m-%d-%H-%M-%S", time.gmtime())
args.output_dir = os.path.join(args.output_dir, args.experiment_name+time_stamp)
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
c_handler = logging.StreamHandler()
f_handler = logging.FileHandler(os.path.join(args.output_dir, 'log.txt'))
formatter = logging.Formatter(
"%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%y/%m/%d %H:%M:%S")
c_handler.setFormatter(formatter)
f_handler.setFormatter(formatter)
logger.addHandler(c_handler)
logger.addHandler(f_handler)
#choose device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.info(f'Running on: {device}')
logger.info(f'Saving to {args.output_dir}')
main_training_loop(args) |
from framework.base_page import BasePage
class SportNewsHomePage(BasePage):
# NBA入口
nba_link = "xpath=>//div[@class='schedule clearfix']/ul/li/a"
def click_nba_link(self):
self.a_click_element(self.nba_link)
self.time_sleep(2) |
# Uso de condicionales para verificar si un numero es mayor a otro
print()
num_1 = int(input('Escriba un primer numero: '))
num_2 = int(input('Escriba un segundo numero: '))
print('==\n')
if num_1 > num_2:
print('El numero {} es mayor a {}.'.format(num_1, num_2))
elif num_1 < num_2:
print('El numero {} es mayor a {}.'.format(num_2, num_1))
else:
print('El numero {} es igual a {}.'.format(num_2, num_1))
print() |
# What is the first term in the Fibonacci sequence to contain 1000 digits
ab = 1
ac = 1
count = 2
while True:
ab += ac
ac += ab
count += 2
if ac > 10**999:
if ab > 10**999:
print count - 1
else:
print count
break
|
# Dana jest posortowana rosnąco tablica A wielkości n zawierająca
# parami rozne liczby naturalne. Podaj algorytm, ktory sprawdzi, czy
# jest taki indeks i, ze A[i] == i.
# Co zmieni sie, jezeli liczby beda calkowite, niekoniecznie naturalne?
# 1. Gdy liczby sa naturalne wystarczy sprawdzic czy A[0] == 0 bo gdy A[0] > 0 to A[i] > i
# dla kazdego i indeksu w A.
# 2. Gdy liczby calkowite trzeba szukac binary searchem.
def findwhennat(A):
if A[0] == 0:
return True
return False
def bin_search(A, left, right):
if left <= right:
mid = (left + right) // 2
if A[mid] == mid:
return True
elif A[mid] > mid:
bin_search(A, left, mid - 1)
else:
bin_search(A, mid + 1, right)
return False
def findwhenint(A):
return bin_search(A, 0, len(A) - 1)
if __name__ == '__main__':
T = [-7,-6,2,4,5,6]
print(findwhenint(T)) |
# ______________________________________________________________________
#
# This module is part of the PyMINLP solver framework.
# ______________________________________________________________________
from pyminlp.solver import PyMINLP
from pyminlp.plugins.quad import *
def foo(filename):
# Create model instance first.
model = createInstance(filename)
# Set up solver.
solver = PyMINLP()
solver.use_constraint_handler(name='LinearHandler',
constypes=['Quadcons1', 'Quadcons2', 'Cut'],
identify_prio=1,
enforce_prio=1,
relaxation=True)
solver.use_constraint_handler(name='QuadConvHandler',
constypes=['Quadcons1', 'Quadcons2'],
identify_prio=2,
enforce_prio=2,
relaxation=False)
solver.use_constraint_handler(name='QuadNoncHandler',
constypes=['Quadcons1', 'Quadcons2'],
identify_prio=3,
enforce_prio=3,
relaxation=False)
relax_solver = SolverFactory('cbc')
solver.set_relaxation_solver(relax_solver)
solver.set_epsilon(0.0001, 0.0001)
solver.set_verbosity(2)
res = solver.solve(model)
print(res)
def plugin_simulation(model, solver):
# Plugin simulation.
hdlrs = solver._used_hdlrs
for (_, hdlr) in hdlrs:
if hdlr.name() == 'quadconv':
conv_hdlr = hdlr
elif hdlr.name() == 'quadnonc':
nonc_hdlr = hdlr
# Cutting plane generation
violated_conss = {'Quadcons1':['e1', 'e2']}
conv_hdlr.separate(violated_conss, model, model.clone())
conv_hdlr.separate(violated_conss, model, model.clone())
# Branching
nonc_hdlr.branch(violated_conss, model)
print('Done')
def createInstance( filename ):
"""Find the model specification in this function. It creates an
instance of this model using the provided data file and returns this
instance.
"""
# Abstract model
model = AbstractModel()
# Sets
model.C = Set()
model.V = Set()
# Constraint parameters
model.A = Param(model.C, model.V, model.V, default=0)
model.b = Param(model.C, model.V, default=0)
model.c = Param(model.C, default=0)
# Constraint bounds
model.cl = Param(model.C)
model.cu = Param(model.C)
# Variable bounds
model.xl = Param(model.V)
model.xu = Param(model.V)
# Objective function parameters
model.z = Param(model.V, default=0)
# Bounds
def var_bounds_rule(model, k):
return model.xl[k], model.xu[k]
# Variables
model.X = Var(model.V, bounds=var_bounds_rule, domain=Reals)
# Objective
def obj_rule(model):
return sum(model.z[k] * model.X[k] for k in model.V)
model.Obj = Objective(rule=obj_rule, sense=minimize)
# Constraints
def cons_rule_1(model, i):
quad = sum(sum(model.A[i, k1, k2] * model.X[k1] * model.X[k2]
for k1 in model.V) for k2 in model.V)
lin = sum(model.b[i, k] * model.X[k] for k in model.V) + model.c[i]
return quad + lin <= model.cu[i]
model.Quadcons1 = Constraint(model.C, rule=cons_rule_1)
def cons_rule_2(model, i):
quad = sum(sum(model.A[i, k1, k2] * model.X[k1] * model.X[k2]
for k1 in model.V) for k2 in model.V)
lin = sum(model.b[i, k] * model.X[k] for k in model.V) + model.c[i]
return -(quad + lin) <= -model.cl[i]
model.Quadcons2 = Constraint(model.C, rule=cons_rule_2)
instance = model.create_instance(filename)
return instance
|
import sys
import os
import argparse
import logging
import json
import time
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.optim import SGD
from torch.autograd import Variable
from torch.nn.utils import clip_grad_norm
from tensorboardX import SummaryWriter
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
import model.utils as utils # noqa
import model.functional as functional # noqa
from data.deep_lesion_dataset import DeepLesionDataset # noqa
from model.gpn import GPN # noqa
from model.bbox_transform import bbox_overlaps # noqa
from model.ellipse_transform import ellipse_overlaps # noqa
parser = argparse.ArgumentParser(description='Train model')
parser.add_argument('cfg_path', default=None, metavar='CFG_PATH', type=str,
help='Path to the config file in json format')
parser.add_argument('save_path', default=None, metavar='SAVE_PATH', type=str,
help='Path to the saved models')
parser.add_argument('--num_workers', default=1, type=int, help='Number of'
' workers for each data loader, default 1')
parser.add_argument('--resume', default=0, type=int, help='If resume from'
' previous run, default 0')
def train_epoch(summary, summary_writer, cfg, model, optimizer, dataloader):
model.train()
steps = len(dataloader)
dataiter = iter(dataloader)
time_now = time.time()
loss_sum = 0
loss_cls_sum = 0
loss_ellipse_sum = 0
n_proposals_sum = 0
acc_pos_sum = 0
acc_neg_sum = 0
angle_err_sum = 0
n_imgs = 0
n_gt_boxes = 0
froc_idx = 0
froc_data_sum = np.zeros((cfg['log_every'] // cfg['TRAIN.FROC_EVERY'],
dataloader.batch_size,
cfg['TEST.RPN_POST_NMS_TOP_N'], 3))
im_info = (cfg['MAX_SIZE'], cfg['MAX_SIZE'])
for step in range(steps):
img, gt_boxes, gt_ellipses = next(dataiter)
gt_boxes = gt_boxes.cuda(async=True)
gt_ellipses = gt_ellipses.cuda(async=True)
labels, bbox_targets, ellipse_targets = model.ellipse_target(
gt_boxes, gt_ellipses)
img = Variable(img.cuda(async=True))
labels = Variable(labels.cuda(async=True))
ellipse_targets = Variable(ellipse_targets.cuda(async=True))
out_cls, out_ellipse = model(img)
loss_cls = model.loss_cls(out_cls, labels)
loss_ellipse = model.loss_ellipse(out_ellipse, labels, ellipse_targets)
loss = loss_cls + loss_ellipse
acc_pos, acc_neg = functional.acc(out_cls, labels)
# number of raw proposals with prob > 0.5
n_proposals = functional.n_proposals(out_cls)
angle_err = functional.angle_err(out_ellipse, labels, ellipse_targets)
loss_sum += loss.data[0]
loss_cls_sum += loss_cls.data[0]
loss_ellipse_sum += loss_ellipse.data[0]
n_proposals_sum += n_proposals.data[0]
acc_pos_sum += acc_pos.data[0]
acc_neg_sum += acc_neg.data[0]
angle_err_sum += angle_err.data[0]
optimizer.zero_grad()
loss.backward()
clip_grad_norm(model.parameters(), cfg['grad_norm'])
optimizer.step()
summary['step'] += 1
if summary['step'] % cfg['TRAIN.FROC_EVERY'] == 0:
froc_data_batch = np.zeros((dataloader.batch_size,
cfg['TEST.RPN_POST_NMS_TOP_N'], 3))
for i in range(out_cls.size(0)):
# final proposals and scores after nms for each image
boxes, ellipses, scores = model.ellipse_proposal(
out_cls[i], out_ellipse[i])
# keep non-padded gt_boxes/gt_ellipses
# keep = gt_boxes[i].gt(0).sum(dim=1).nonzero().view(-1)
# overlaps = bbox_overlaps(boxes, gt_boxes[i][keep])
keep = gt_ellipses[i].gt(0).sum(dim=1).nonzero().view(-1)
overlaps = ellipse_overlaps(ellipses, gt_ellipses[i][keep],
im_info)
overlaps_max, idcs_max = overlaps.max(dim=1)
n_ = scores.size(0)
n_imgs += 1
n_gt_boxes += keep.size(0)
froc_data_batch[i, :n_, 0] = scores.cpu().numpy()
froc_data_batch[i, :n_, 1] = overlaps_max.cpu().numpy()
froc_data_batch[i, :n_, 2] = idcs_max.cpu().numpy()
froc_data_sum[froc_idx] = froc_data_batch
froc_idx += 1
if summary['step'] % cfg['log_every'] == 0:
time_spent = time.time() - time_now
time_now = time.time()
loss_sum /= cfg['log_every']
loss_cls_sum /= cfg['log_every']
loss_ellipse_sum /= cfg['log_every']
n_proposals_sum = int(n_proposals_sum / cfg['log_every'])
acc_pos_sum /= cfg['log_every']
acc_neg_sum /= cfg['log_every']
angle_err_sum /= cfg['log_every']
FROC, sens = utils.froc(
froc_data_sum.reshape((-1, cfg['TEST.RPN_POST_NMS_TOP_N'], 3)),
n_imgs, n_gt_boxes, iou_thred=cfg['TEST.FROC_OVERLAP'])
sens_str = ' '.join(list(map(lambda x: '{:.3f}'.format(x), sens)))
logging.info(
'{}, Train, Epoch : {}, Step : {}, Total Loss : {:.4f}, '
'Cls Loss : {:.4f}, Ellipse Loss : {:.4f}, Pos Acc : {:.3f}, '
'Neg Acc : {:.3f}, Angle Err : {:.3f}, FROC : {:.3f}, '
'Sens : {}, #Props/Img : {}, Run Time : {:.2f} sec'
.format(time.strftime("%Y-%m-%d %H:%M:%S"),
summary['epoch'] + 1, summary['step'], loss_sum,
loss_cls_sum, loss_ellipse_sum, acc_pos_sum,
acc_neg_sum, angle_err_sum, FROC, sens_str,
n_proposals_sum, time_spent))
summary_writer.add_scalar(
'train/total_loss', loss_sum, summary['step'])
summary_writer.add_scalar(
'train/loss_cls', loss_cls_sum, summary['step'])
summary_writer.add_scalar(
'train/loss_ellipse', loss_ellipse_sum, summary['step'])
summary_writer.add_scalar(
'train/n_proposals', n_proposals_sum, summary['step'])
summary_writer.add_scalar(
'train/acc_pos', acc_pos_sum, summary['step'])
summary_writer.add_scalar(
'train/acc_neg', acc_neg_sum, summary['step'])
summary_writer.add_scalar(
'train/angle_err', angle_err_sum, summary['step'])
summary_writer.add_scalar(
'train/FROC', FROC, summary['step'])
loss_sum = 0
loss_cls_sum = 0
loss_ellipse_sum = 0
n_proposals_sum = 0
acc_pos_sum = 0.0
acc_neg_sum = 0.0
angle_err_sum = 0
n_imgs = 0
n_gt_boxes = 0
froc_idx = 0
froc_data_sum = np.zeros((
cfg['log_every'] // cfg['TRAIN.FROC_EVERY'],
dataloader.batch_size, cfg['TEST.RPN_POST_NMS_TOP_N'], 3))
summary['epoch'] += 1
return summary
def valid_epoch(summary, cfg, model, dataloader):
model.eval()
steps = len(dataloader)
dataiter = iter(dataloader)
loss_sum = 0
loss_cls_sum = 0
loss_ellipse_sum = 0
n_proposals_sum = 0
acc_pos_sum = 0
acc_neg_sum = 0
angle_err_sum = 0
n_imgs = 0
n_gt_boxes = 0
froc_data_sum = np.zeros((steps, dataloader.batch_size,
cfg['TEST.RPN_POST_NMS_TOP_N'], 3))
im_info = (cfg['MAX_SIZE'], cfg['MAX_SIZE'])
for step in range(steps):
img, gt_boxes, gt_ellipses = next(dataiter)
gt_boxes = gt_boxes.cuda(async=True)
gt_ellipses = gt_ellipses.cuda(async=True)
labels, bbox_targets, ellipse_targets = model.ellipse_target(
gt_boxes, gt_ellipses)
img = Variable(img.cuda(async=True))
labels = Variable(labels.cuda(async=True))
ellipse_targets = Variable(ellipse_targets.cuda(async=True))
out_cls, out_ellipse = model(img)
loss_cls = model.loss_cls(out_cls, labels)
loss_ellipse = model.loss_ellipse(out_ellipse, labels, ellipse_targets)
loss = loss_cls + loss_ellipse
acc_pos, acc_neg = functional.acc(out_cls, labels)
n_proposals = functional.n_proposals(out_cls)
angle_err = functional.angle_err(out_ellipse, labels, ellipse_targets)
froc_data_batch = np.zeros((dataloader.batch_size,
cfg['TEST.RPN_POST_NMS_TOP_N'], 3))
for i in range(out_cls.size(0)):
# final proposals and scores after nms for each image
boxes, ellipses, scores = model.ellipse_proposal(out_cls[i],
out_ellipse[i])
# keep non-padded gt_boxes/gt_ellipses
# keep = gt_boxes[i].gt(0).sum(dim=1).nonzero().view(-1)
# overlaps = bbox_overlaps(boxes, gt_boxes[i][keep])
keep = gt_ellipses[i].gt(0).sum(dim=1).nonzero().view(-1)
overlaps = ellipse_overlaps(ellipses, gt_ellipses[i][keep],
im_info)
overlaps_max, idcs_max = overlaps.max(dim=1)
n_ = scores.size(0)
n_imgs += 1
n_gt_boxes += keep.size(0)
froc_data_batch[i, :n_, 0] = scores.cpu().numpy()
froc_data_batch[i, :n_, 1] = overlaps_max.cpu().numpy()
froc_data_batch[i, :n_, 2] = idcs_max.cpu().numpy()
loss_sum += loss.data[0]
loss_cls_sum += loss_cls.data[0]
loss_ellipse_sum += loss_ellipse.data[0]
n_proposals_sum += n_proposals.data[0]
acc_pos_sum += acc_pos.data[0]
acc_neg_sum += acc_neg.data[0]
angle_err_sum += angle_err.data[0]
froc_data_sum[step] = froc_data_batch
FROC, sens = utils.froc(
froc_data_sum.reshape((-1, cfg['TEST.RPN_POST_NMS_TOP_N'], 3)),
n_imgs, n_gt_boxes, iou_thred=cfg['TEST.FROC_OVERLAP'])
sens_str = ' '.join(list(map(lambda x: '{:.3f}'.format(x), sens)))
summary['loss'] = loss_sum / steps
summary['loss_cls'] = loss_cls_sum / steps
summary['loss_ellipse'] = loss_ellipse_sum / steps
summary['n_proposals'] = int(n_proposals_sum / steps)
summary['acc_pos'] = acc_pos_sum / steps
summary['acc_neg'] = acc_neg_sum / steps
summary['angle_err'] = angle_err_sum / steps
summary['FROC'] = FROC
summary['sens_str'] = sens_str
return summary
def run(args):
with open(args.cfg_path) as f:
cfg = json.load(f)
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
if not args.resume:
with open(os.path.join(args.save_path, 'cfg.json'), 'w') as f:
json.dump(cfg, f, indent=1)
model = GPN(cfg).cuda()
optimizer = SGD(model.parameters(), lr=cfg['lr'], momentum=cfg['momentum'],
weight_decay=cfg['weight_decay'])
dataloader_train = DataLoader(DeepLesionDataset(cfg, '1'),
batch_size=cfg['TRAIN.IMS_PER_BATCH'],
num_workers=args.num_workers,
drop_last=False,
shuffle=True)
dataloader_valid = DataLoader(DeepLesionDataset(cfg, '2'),
batch_size=cfg['TEST.IMS_PER_BATCH'],
num_workers=args.num_workers,
drop_last=False)
summary_train = {'epoch': 0, 'step': 0}
summary_valid = {'loss': float('inf'), 'loss_cls': float('inf'),
'loss_ellipse': float('inf')}
summary_writer = SummaryWriter(args.save_path)
FROC_valid_best = 0
epoch_start = 0
if args.resume:
ckpt_path = os.path.join(args.save_path, 'train.ckpt')
ckpt = torch.load(ckpt_path)
model.load_state_dict(ckpt['state_dict'])
summary_train = {'epoch': ckpt['epoch'], 'step': ckpt['step']}
FROC_valid_best = ckpt['FROC_valid_best']
epoch_start = ckpt['epoch']
for epoch in range(epoch_start, cfg['epoch']):
lr = utils.lr_schedule(cfg['lr'], cfg['lr_factor'],
summary_train['epoch'], cfg['lr_epoch'])
for param_group in optimizer.param_groups:
param_group['lr'] = lr
summary_train = train_epoch(summary_train, summary_writer, cfg, model,
optimizer, dataloader_train)
time_now = time.time()
summary_valid = valid_epoch(summary_valid, cfg, model,
dataloader_valid)
time_spent = time.time() - time_now
logging.info(
'{}, Valid, Epoch : {}, Step : {}, Total Loss : {:.4f}, '
'Cls Loss : {:.4f}, Ellipse Loss : {:.4f}, Pos Acc : {:.3f}, '
'Neg Acc : {:.3f}, Angle Err : {:.3f}, FROC : {:.3f}, Sens : {}, '
'#Props/Img : {}, Run Time : {:.2f} sec'
.format(time.strftime("%Y-%m-%d %H:%M:%S"),
summary_train['epoch'], summary_train['step'],
summary_valid['loss'], summary_valid['loss_cls'],
summary_valid['loss_ellipse'], summary_valid['acc_pos'],
summary_valid['acc_neg'], summary_valid['angle_err'],
summary_valid['FROC'], summary_valid['sens_str'],
summary_valid['n_proposals'], time_spent))
summary_writer.add_scalar('valid/total_loss',
summary_valid['loss'],
summary_train['step'])
summary_writer.add_scalar('valid/loss_cls',
summary_valid['loss_cls'],
summary_train['step'])
summary_writer.add_scalar('valid/loss_ellipse',
summary_valid['loss_ellipse'],
summary_train['step'])
summary_writer.add_scalar('valid/n_proposals',
summary_valid['n_proposals'],
summary_train['step'])
summary_writer.add_scalar('valid/acc_pos',
summary_valid['acc_pos'],
summary_train['step'])
summary_writer.add_scalar('valid/acc_neg',
summary_valid['acc_neg'],
summary_train['step'])
summary_writer.add_scalar('valid/angle_err',
summary_valid['angle_err'],
summary_train['step'])
summary_writer.add_scalar('valid/FROC',
summary_valid['FROC'],
summary_train['step'])
if summary_valid['FROC'] > FROC_valid_best:
FROC_valid_best = summary_valid['FROC']
torch.save({'epoch': summary_train['epoch'],
'step': summary_train['step'],
'FROC_valid_best': FROC_valid_best,
'state_dict': model.state_dict()},
os.path.join(args.save_path, 'best.ckpt'))
logging.info(
'{}, Best, Epoch : {}, Step : {}, Total Loss : {:.4f}, '
'Cls Loss : {:.4f}, Ellipse Loss : {:.4f}, Pos Acc : {:.3f}, '
'Neg Acc : {:.3f}, Angle Err : {:.3f}, FROC : {:.3f}, '
'Sens : {}, #Props/Img : {}'
.format(time.strftime("%Y-%m-%d %H:%M:%S"),
summary_train['epoch'], summary_train['step'],
summary_valid['loss'], summary_valid['loss_cls'],
summary_valid['loss_ellipse'],
summary_valid['acc_pos'], summary_valid['acc_neg'],
summary_valid['angle_err'], summary_valid['FROC'],
summary_valid['sens_str'],
summary_valid['n_proposals']
))
torch.save({'epoch': summary_train['epoch'],
'step': summary_train['step'],
'FROC_valid_best': FROC_valid_best,
'state_dict': model.state_dict()},
os.path.join(args.save_path, 'train.ckpt'))
summary_writer.close()
def main():
logging.basicConfig(level=logging.INFO)
args = parser.parse_args()
run(args)
if __name__ == '__main__':
main()
|
#determinar si enviamos una alerta de sismo
#la alerta de sismo se enviara si y solo si, el valor de la actividad es mayor a 5
#num_habitantes > 1 -> si envio la alerta
valor_escala = float(input("Favor ingrese el valor en escala "))
num_habitantes = float(input("Favor ingrese el numero de habitantes en MM "))
'''
#opcion A
print("Valor escala {}".format(valor_escala))
if valor_escala > 5:
print("Si hay un sismo")
if num_habitantes > 1:
print("Enviar alerta")
else:
print("No enviar alerta")
else:
print("No hay un sismo")
#opcion B
if valor_escala > 5 and num_habitantes >1:
print("Si hay un sismo y debo enviar alerta")
else:
if valor_escala > 5 and num_habitantes <=1:
print("Si hay un sismo")
else:
print("No hay un sismo")
'''
#opcion C
if valor_escala > 5 and num_habitantes >1:
print("Si hay un sismo y debo enviar alerta")
elif valor_escala > 5 and num_habitantes <=1:
print("Si hay un sismo")
else:
print("No hay un sismo")
|
# Geschreven door Mark
import tkinter as tk
from GUI import Input
from Database import Database
from Billing import Billing
from tkinter import BOTH, END, LEFT
import uuid
from Email import Document
from Email import Email
class BillingEnglish(tk.Frame):
_emailInput = None
_nameInput = None
_lastNameInput = None
_billingAccount = None
#de license plate die geedit word
_licenseGuid = None
_informationLabel = None
def __init__(self, parent, controller):
"""laadt de pagina objecten"""
tk.Frame.__init__(self, parent)
self.controller = controller
self.billingViewEnglish(controller)
def billingViewEnglish(self, controller):
"""laadt de pagina objecten en ontvangt de entry input"""
self.controller = controller
frameLeft = tk.Frame(self, bg="white")
frameLeft.pack(side="left", fill="both", ipady="240", ipadx="114")
frameRight = tk.Frame(self, bg="#FF554F", bd="1")
frameRight.pack(side="right", fill="both", ipady="240", ipadx="50")
self._informationLabel = tk.Label(frameLeft, text="Fill in your data\n"
"for automatic facturation ",
font="Helvetica, 14", bg="#313D4B", fg="white")
self._informationLabel.pack(pady="57", ipady="10", ipadx="120")
frameLeftInput = tk.Frame(frameLeft, bg="#313D4B")
frameLeftInput.pack(pady="98")
def billing_format():
"""format voor de billing invoer velden"""
billingInput = ["Name:", "Lastname", "E-mail:", "Account Number:"]
billingOutput = []
for field in billingInput:
frame = tk.Frame(frameLeftInput, width="42", bg="white")
frame.pack(padx="100", pady="10", ipadx="10")
label = tk.Label(frame, text=field, anchor="w", width="16", bg="white", fg="#313D4B")
label.pack(side="left")
entry = tk.Entry(frame, width="30", bg="white", fg="#313D4B", relief="flat")
entry.pack(side="right")
billingOutput.append(entry)
self._nameInput = billingOutput[0]
self._lastNameInput = billingOutput[1]
self._emailInput = billingOutput[2]
self._billingAccount = billingOutput[3]
return billingOutput
entries = billing_format()
sendButton = tk.Button(frameLeftInput, text="SEND", width="40", height="2", bg="#ff554f", fg="white",
font="Helvetica, 10", command=(lambda: self.Send(entries)))
sendButton.pack(side="bottom", pady="10")
sendButton.bind('<Return>', (lambda event: self.Send(entries)))
buttonHome = tk.Button(frameRight, text="Next \n customer", width="24", height="6", bg="white",
command=(lambda: controller.show_frame("StartPageEnglish")))
buttonHome.pack(padx="10", pady="20")
def Send(self,entries):
"haalt de text op"
# TODO iban valideren
values = Input.Input.get(Input.Input,entries)
for item in values:
if not item:
self.ShowMessage("One or more field \nare incomplete")
return
name = self.CreateEmptyIfNone(values[0])
lastname = self.CreateEmptyIfNone(values[1])
emailAddress = self.CreateEmptyIfNone(values[2])
accountnumber = self.CreateEmptyIfNone(values[3])
recordguid = Database.ParseRecordGuid(self._licenseGuid)
licenseplate = self.GetLicensePlate(recordguid)
exists = Database.Query("SELECT RecordGuid FROM Customer WHERE ref_Car = ?",recordguid)
if any(exists):
Database.Insert("UPDATE Customer set [Name] = ?, [Lastname] = ?, [Email] = ?, [Account_number] = ? where ref_Car = ?" , name, lastname, emailAddress, accountnumber, recordguid)
else:
Database.Insert("INSERT INTO Customer (Name,Lastname,Email,Account_number,ref_Car) VALUES (?,?,?,?,?)",
name, lastname, emailAddress, accountnumber, recordguid)
# self.controller.show_frame("StartPageDutch")
items = Database.Query("SELECT RecordGuid FROM Park_times WHERE Arrival IS NOT NULL AND Departure IS NOT NULL AND Is_send = 0")
if not any(items):
return
# factudingesen
self.ShowMessage("An e-mail will be send!")
billing = Billing.Billing()
payment = billing.parkedTime(recordguid)
print(payment)
document = Document.EmailDocument()
documentName = document.CreateDocument(name, licenseplate, payment[1], 3.0, payment[0])
#TODO Document toevoegen (werkt nog niet helemaal)
email = Email.SendEmail()
email.SetSubject("Automatic invoice")
email.AppendEmail(emailAddress)
email.SetMessage("In the attachments you will find your invoice")
email.Send(documentName)
def CreateEmptyIfNone(self,s):
if s is None:
return ''
return str(s)
def ShowMessage(self,message):
if not self._informationLabel:
return
if not message:
return
self._informationLabel.config(text=message)
def TryParse(self,value,type):
try:
value = type(value)
return True
except:
return False
def Update(self,data):
"""..."""
if data is None:
self.controller.show_frame("PayEnglish")
return
self._licenseGuid = self.CreateEmptyIfNone(data["recordguid"])
self._billingAccount.delete(0,END)
self._billingAccount.insert(0, self.CreateEmptyIfNone(data["account_number"]))
self._emailInput.delete(0, END)
self._emailInput.insert(0, self.CreateEmptyIfNone(data["email"]))
self._nameInput.delete(0, END)
self._nameInput.insert(0, self.CreateEmptyIfNone(data["name"]))
self._lastNameInput.delete(0, END)
self._lastNameInput.insert(0,self.CreateEmptyIfNone( data["lastname"]))
def GetLicensePlate(self,recordGuid):
query = Database.Query("SELECT License_plate FROM Car WHERE RecordGuid = ?",recordGuid)
if not any(query):
return None
# TODO Scalar
query = query[0]
return query["license_plate"] |
from seeltools.utilities.log import logger
from seeltools.utilities.parse import read_from_xml_node, xml_to_objfy
class Relationship(object):
def __init__(self):
self.pTolerance = 0
self.pDefaultTolerance = 0
self.defaultTolerance = 1
self.minID = 0
self.maxID = -1
self.toleranceMap = {} # ??? can this be useful?
self.toleranceMap["enemy"] = {"name": "enemy",
"tolerance": 1}
self.toleranceMap["neutral"] = {"name": "neutral",
"tolerance": 2}
self.toleranceMap["ally"] = {"name": "ally",
"tolerance": 3}
self.toleranceMap["own"] = {"name": "own",
"tolerance": 4}
self.toleranceList = list(self.toleranceMap.values())
def LoadFromXML(self, xmlFile, copy_to_default: bool = True):
xmlNode = xml_to_objfy(xmlFile)
if xmlNode.tag != "relationship":
raise ValueError("Relationship XML should contain root tag 'relationship'!")
formatType = 0
self.minID = int(read_from_xml_node(xmlNode, "MinPlayerID"))
self.maxID = int(read_from_xml_node(xmlNode, "MaxPlayerID"))
default_tolerance_name = read_from_xml_node(xmlNode, "DefaultTolerance")
self.defaultTolerance = self.GetToleranceByName(default_tolerance_name)
if self.minID <= self.maxID:
if self.maxID - self.minID + 1 > 1000:
self.maxID = self.minID + 1001
logger.warning(f"Tolerance range can't be more than 1001, for example 1000-2001 range is valid."
f"Reseting max value to be {self.maxID}")
self.pTolerance = {}
tolerance_range_len = self.maxID - self.minID
tolerance_id_range = range(0, tolerance_range_len**2)
for tolerance_id in tolerance_id_range:
self.pTolerance[tolerance_id] = self.defaultTolerance
format_type_read = read_from_xml_node(xmlNode, "FormatType", do_not_warn=True)
if format_type_read is not None:
formatType = int(format_type_read)
if formatType != 0:
if formatType == 1:
raise NotImplementedError("Can't work with saves yet, do not load from currentmap.xml")
self.LoadFormat(xmlFile, xmlNode, 1) # ??? do we even need this for format_type 1?
else:
raise ValueError(f"Invalid format type {formatType} for relationship")
else:
self.LoadFormat(xmlFile, xmlNode, 0)
if copy_to_default:
self.pDefaultTolerance = self.pTolerance
else:
raise ValueError("Relationship MinPlayerID should be less than MaxPlayerID")
def LoadFormat(self, xmlFile, xmlNode, format_type: int = 0):
for rel in xmlNode.iterchildren():
if rel.tag != "set":
logger.warning(f"Invalid tag {rel.tag} in Relationship map {xmlNode.base}")
else:
tolerance = self.GetToleranceByName(read_from_xml_node(rel, "tolerance"))
for_whom = read_from_xml_node(rel, "forwhom")
who = read_from_xml_node(rel, "who")
self.SetTolerance(for_whom, who, tolerance)
def SaveFormat(self, xmlFile, xmlNode, format_type: int = 0):
raise NotImplementedError("Not imlemented SaveFormat method for Relationship")
def SaveToXML(self, xmlFile, xmlNode):
raise NotImplementedError("Not imlemented SaveToXML method for Relationship")
def SetTolerance(self, for_whom, who, tolerance):
invalid_belongs = []
min_id = self.minID
max_id = self.maxID
for_whom = [int(belong) for belong in for_whom.split()]
invalid_belongs.extend([bel for bel in for_whom if bel > self.maxID or bel < self.minID])
who = [int(belong) for belong in who.split()]
invalid_belongs.extend([bel for bel in who if bel > self.maxID or bel < self.minID])
if invalid_belongs:
for belong in invalid_belongs:
logger.warning(f"Invalid belong {belong} listed in relationship map! "
f"Belong should be between {self.minID} and {self.maxID}")
for who_belong in who:
for whom_belong in for_whom:
if whom_belong != who_belong:
# strange native way to store belong to belong tolerance mappings
self.pTolerance[whom_belong + (who_belong - min_id) * (max_id - min_id + 1) - min_id] = tolerance
self.pTolerance[who_belong + (whom_belong - min_id) * (max_id - min_id + 1) - min_id] = tolerance
else:
logger.warning(f"Belong {who_belong} is in both 'forwhom' and 'who' attribs of set!")
def GetTolerance(self, belongId1: int, belongId2: int, from_default: bool = False):
if from_default:
tolerance_source = self.pDefaultTolerance
else:
tolerance_source = self.pTolerance
if belongId1 == belongId2:
return self.toleranceMap["own"]
if belongId1 < self.minID or belongId1 > self.maxID or belongId2 < self.minID or belongId2 > self.maxID:
return self.defaultTolerance
else:
return tolerance_source[belongId1 + (belongId2 - self.minID) * (self.maxID - self.minID + 1) - self.minID]
def GetToleranceByName(self, tol_name):
tol = self.toleranceMap.get(tol_name)
if tol is not None:
return tol["tolerance"]
else:
return self.defaultTolerance
def CheckTolerance(self, belongId1: int, belongId2: int, from_default: bool = False):
if belongId1 == belongId2:
return self.toleranceMap["own"]
if belongId1 < self.minID or belongId1 > self.maxID or belongId2 < self.minID or belongId2 > self.maxID:
return self.defaultTolerance
tolerance_difference = self.GetTolerance(belongId1, belongId2, from_default) - self.defaultTolerance
nearest_tolerance = self.defaultTolerance
min_tolerance = abs(tolerance_difference)
tolerance_declared = self.GetTolerance(belongId1, belongId2)
for tolerance_state in self.toleranceList:
tolerance_for_index = tolerance_state['tolerance']
if min_tolerance > abs(tolerance_declared - tolerance_for_index):
nearest_tolerance = tolerance_for_index
min_tolerance = abs(tolerance_declared - tolerance_for_index)
return nearest_tolerance
|
import pyspark
sc=pyspark.SparkContext()
file_text=sc.textFile('finalDataWithRepeats.txt')
def pairs(line):
fields=line.split('|')
hours=float(fields[0])
#to many decimals, so reduce it to 0.5 or to 0.0. Only to make it pretty
hours=int(hours*2)/2
calls=fields[1]
try: data=int(data[1])
except: data=0
return hours, calls
def pairsWithAvg(x):
hours=x[0]
listNums=list(x[1])
for i in range(len(listNums)):
listNums[i]=int(listNums[i])
length=len(listNums)
total=sum(listNums)
avgCalls=total/length
return hours, avgCalls
#hours, calls
a=file_text.map(pairs)
#hours, list of number of calls --> (hours, [#,#,...,#])
groups=a.groupByKey()
#hours, avgOfcalls
replaced=groups.map(pairsWithAvg)
#sort numerically the length
s=replaced.sortBy(lambda x: float(x[0]), ascending=True)
#Create a list with all the tuples with the data
dataList=s.collect()
fout=open("finalData.txt","w")
for k in dataList:
print(str(k[0])+"|"+ str(k[1]), file=fout)
|
from datetime import date, datetime, timedelta
from coinapi_service import coin_api_get_exchange_rates
'''
date1 = date.today() # date du jour
#date2 = date1 + timedelta(10) # additionner / soustraire des jours
date3 = date(2021, 1, 30)
diff = date1-date3
# print(diff.days)
date3_str = date3.strftime("%d/%m/%Y") # Y (Years) m (months) d (days)
date4 = datetime.strptime("2021-02-01", "%Y-%m-%d").date()
date4 += timedelta(1)
print(date4)
'''
date_today = date.today()
date_today_str = date_today.strftime("%Y-%m-%d")
date_start = date_today - timedelta(10)
date_start_str = date_start.strftime("%Y-%m-%d")
print("Date start", date_start_str)
print("Date end", date_today_str)
assets = "BTC/EUR"
rates = coin_api_get_exchange_rates(assets, date_start_str, date_today_str)
if rates:
print(assets + ", nombre de cours:", len(rates))
for r in rates:
print(r["time_period_start"][:10], ":", r["rate_close"])
# .time_period_start : rate_close
# 2021-01-01 : 24032.11824302815
# 2021-01-02 : 24032.11824302815 |
import sys
sys.path.append('..')
from intcode_computer import IntcodeComputer
from collections import defaultdict
class Node:
def __init__(self, position):
self.position = position
self.children = [None, None, None]
DIRECTION_MAP = {
1: [4, 3, 1, 2],
2: [3, 4, 2, 1],
3: [1, 2, 3, 4],
4: [2, 1, 4, 3]
}
DIRECTION_MODIFIERS = {
1: (0, 1),
2: (0, -1),
3: (-1, 0),
4: (1, 0)
}
OXYGEN_POS = ()
OXYGEN_DEPTH = 0
def wait_for_output(computer):
current_output = computer.outputs[:]
while computer.outputs == current_output:
computer.perform_next_operation()
def modify_position(pos, direction):
modifier = DIRECTION_MODIFIERS[direction]
return (pos[0] + modifier[0], pos[1] + modifier[1])
def traverse(root, direction, depth=0):
global OXYGEN_POS, OXYGEN_DEPTH
for i in range(3):
new_direction = DIRECTION_MAP[direction][i]
computer.inputs.append(new_direction)
wait_for_output(computer)
output = computer.outputs[-1]
new_pos = modify_position(root.position, new_direction)
if output == 1 or output == 2:
floor.add(new_pos)
root.children[i] = Node(new_pos)
traverse(root.children[i], new_direction, depth + 1)
if output == 2:
OXYGEN_POS = new_pos
OXYGEN_DEPTH = depth + 1
else:
walls.add(new_pos)
root.children[i] = False
back_direction = DIRECTION_MAP[direction][3]
computer.inputs.append(back_direction)
wait_for_output(computer)
output = computer.outputs[-1]
next_pos = modify_position(root.position, back_direction)
intcode = [int(x) for x in open('input.txt', 'r').read().split(',')]
computer = IntcodeComputer(intcode[:], [])
pos = (0,0)
root = Node(pos)
walls = set()
floor = set()
floor.add(pos)
traverse(root, 1)
print(OXYGEN_DEPTH)
oxygen_filled = {OXYGEN_POS}
just_filled = {OXYGEN_POS}
count = 0
while len(oxygen_filled) != len(floor):
next_filled = set()
for pos in just_filled:
neighbours = {
(pos[0], pos[1]+1),
(pos[0]+1, pos[1]),
(pos[0], pos[1]-1),
(pos[0]-1, pos[1])
} - walls
# print(neighbours)
for n in neighbours:
oxygen_filled.add(n)
next_filled.add(n)
just_filled = next_filled
count += 1
print(count)
|
k = { 'EN':'English', 'FR':'French' }
#append Spanish
k.update({'ESP':'Spanish'})
k['DE'] = 'German'
print(k['ESP'])
|
from opengever.core.upgrade import SchemaMigration
from opengever.meeting.activity.watchers import add_watcher_on_proposal_created
from opengever.meeting.activity.watchers import add_watchers_on_submitted_proposal_created
class RegisterWatchersForProposals(SchemaMigration):
"""Register watchers for proposals.
"""
def migrate(self):
for obj in self.objects({'portal_type': 'opengever.meeting.proposal'},
'Register watchers for proposals'):
add_watcher_on_proposal_created(obj)
for obj in self.objects(
{'portal_type': 'opengever.meeting.submittedproposal'},
'Register watchers for submitted proposals'):
add_watchers_on_submitted_proposal_created(obj)
|
#Embedded file name: talecommon\const.py
"""
A common module to provide constants for the tale system
"""
import collections
import utillib as util
from dogma.const import attributeScanGravimetricStrength
from inventorycommon.const import ownerUnknown
from eve.common.lib.appConst import securityClassZeroSec, securityClassLowSec, securityClassHighSec
templates = util.KeyVal(incursion=2, knownSpace=3, solarSystem=4)
actionClass = util.KeyVal(spawnOneDungeonAtEachCelestial=1, spawnManyDungeonsAtLocation=2, disableDjinns=3, addDjinnCommand=4, addSystemEffectBeacon=5, addSystemInfluenceTrigger=6, initializeInfluence=7, setBountySurcharge=8, endTale=9, spawnDungeonAtDeterministicLocation=10, spawnNPCsAtLocation=11)
conditionClass = util.KeyVal(checkSolarSystemSecurity=1, checkInitiationChance=2)
systemInfluenceAny = 0
systemInfluenceDecline = 1
systemInfluenceRising = 2
Parameter = collections.namedtuple('Parameter', 'name parameterType defaultValue prettyName description')
parameterByID = {1: Parameter('dungeonID', int, 0, 'Dungeon ID', 'The ID of the dungeon to spawn'),
2: Parameter('dungeonListID', int, None, 'Dungeon list ID', 'The ID of the list of dungeons to spawn'),
3: Parameter('dungeonRespawnTime', int, 1, 'Dungeon respawn time', 'Dungeon respawn time in minutes'),
4: Parameter('dungeonScanStrength', int, 100, 'Dungeon scan strength', 'Dungeon scan strength for scanning down the dungeon'),
5: Parameter('dungeonSignatureRadius', float, 100.0, 'Dungeon signature radius', 'Dungeon signature radius used for scanning down the dungeon'),
6: Parameter('dungeonScanStrengthAttrib', float, attributeScanGravimetricStrength, 'Dungeon scan attribute', 'Dungeon scan attribute'),
7: Parameter('dungeonSpawnLocation', float, None, 'Dungeon spawn location', 'The locations in space where the dungeon is going to respawn'),
8: Parameter('dungeonSpawnQuantity', int, 1, 'Number of Dungeons', 'The number of dungeons which have to be spawned'),
9: Parameter('triggeredScene', int, None, 'Triggered Scene', 'The scene which is added to the trigger location when activated'),
10: Parameter('triggeredSceneLocation', int, None, 'Trigger Location', 'The location the triggered scene is added when the trigger is activated'),
11: Parameter('solarSystemSecurityMin', float, 1.0, 'Security minimum', 'The security level of the solar system has to be above this before the condition is true'),
12: Parameter('solarSystemSecurityMax', float, 0.0, 'Security maximum', 'The security level of the solar system has to be below this before the condition is true'),
13: Parameter('solarSystemSecurityMinInclusive', bool, True, 'Security minimum inclusive', 'This is whether the minimum should be inclusive or exclusive'),
14: Parameter('solarSystemSecurityMaxInclusive', bool, False, 'Security maximum inclusive', 'This is whether the maximum should be inclusive or exclusive'),
15: Parameter('disableConvoyDjinn', bool, False, 'Disable convoy djinn', 'Disables the convoy djinn during the tale'),
16: Parameter('disableCustomsPoliceDjinn', bool, False, 'Disable custom police djinn', 'Disables the custom police during the tale'),
17: Parameter('disableEmpirePoliceDjinn', bool, False, 'Disable empire police djinn', 'Disables the empire police during the tale'),
18: Parameter('disableMilitaryFactionDjinn', bool, False, 'Disable military faction djinn', 'Disables the military faction djinn during the tale'),
19: Parameter('disablePirateDjinn', bool, False, 'Disable pirate djinn', 'Disables the pirate djinn during the tale'),
20: Parameter('disablePirateAutoDjinn', bool, False, 'Disable pirate auto djinn', 'Disables the pirate auto djinn during the tale'),
21: Parameter('disablePirateStargateDjinn', bool, False, 'Disable pirate stargate djinn', 'Disables the pirate Stargate djinn during the tale'),
22: Parameter('djinnCommandID', int, 0, 'Djinn command ID', 'The djinn command ID in this is added to solar system the scene is running in'),
23: Parameter('systemEffectBeaconTypeID', int, 0, 'System effect beacon type ID', 'The type ID of the systems effect beacon'),
24: Parameter('systemEffectBeaconBlockCynosural', bool, False, 'System effect beacon blocks cyno', 'The system effect beacon will also block cynosural jump'),
25: Parameter('systemInfluenceTriggerDirection', int, systemInfluenceAny, 'Trigger direction', 'What direction the influence should change before the trigger is triggered'),
26: Parameter('systemInfluenceTriggerValue', float, 0.0, 'Trigger value', 'The value around which the trigger should be triggered'),
27: Parameter('dummyParameter', float, 0.0, 'Dummy Parameter', 'This is a dummy parameter for actions that take no parameters'),
28: Parameter('surchargeRate', float, 0.2, 'Surcharge Rate', 'This is the surcharge rate that will be applied to this system'),
29: Parameter('ownerID', int, ownerUnknown, 'Owner ID', 'Specifies the owner for items deployed through the scene.'),
30: Parameter('entityTypeID', int, 0, 'Entity TypeID', 'The typeID for NPC to spawn.'),
31: Parameter('entityAmountMin', int, 1, 'Minimum Entity Spawn Amount', 'The minimum amount of NPCs that should spawn.'),
32: Parameter('entityAmountMax', int, 1, 'Maximum Entity Spawn Amount', 'The maximum amount of NPCs that should spawn.'),
33: Parameter('entityGroupRespawnTimer', int, 30, 'Group Respawn Timer', 'The time (in minutes) it will take for the whole group to respawn if killed.'),
34: Parameter('entityReinforcementTypeList', int, 0, 'Reinforcement Type List', 'The list of entity groups used for reinforcing the NPC spawn - see: gd/npc/classes&groups and gd/spawnlists.'),
35: Parameter('entityReinforcementCooldownTimer', int, 0, 'Reinforcement Cooldown Timer', 'The time (in seconds) that can pass between the NPC group asking for reinforcements.'),
37: Parameter('initiateActionsChance', int, 1, 'Action Initiation Chance', 'Chance of initiating any condition/action after this condition.')}
parameter = util.KeyVal()
for _parameterID, _parameterLine in parameterByID.iteritems():
setattr(parameter, _parameterLine.name, _parameterID)
sceneTypeMinConditional = 1000001
sceneTypeMinSystem = 5000001
scenesTypes = util.KeyVal()
conditionalScenesTypes = util.KeyVal()
sceneTypesByID = {1: util.KeyVal(name='headquarters', display='Headquarters'),
2: util.KeyVal(name='assault', display='Assault'),
3: util.KeyVal(name='vanguard', display='Vanguard'),
4: util.KeyVal(name='staging', display='Staging'),
5: util.KeyVal(name='testscene', display='Test Scene'),
6: util.KeyVal(name='system', display='Solar System'),
1000001: util.KeyVal(name='boss', display='Boss Spawn'),
1000002: util.KeyVal(name='endTale', display='End Tale'),
2000001: util.KeyVal(name='testscene1', display='Conditional Test Scene 1'),
2000002: util.KeyVal(name='testscene2', display='Conditional Test Scene 2'),
2000003: util.KeyVal(name='testscene3', display='Conditional Test Scene 3'),
2000004: util.KeyVal(name='testscene4', display='Conditional Test Scene 4'),
2000005: util.KeyVal(name='testscene5', display='Conditional Test Scene 5'),
5000001: util.KeyVal(name='managerInit', display='Initialize Manager ')}
for _constID, _constNames in sceneTypesByID.iteritems():
setattr(scenesTypes, _constNames.name, _constID)
distributionStatus = util.KeyVal(success=1, locationAlreadyUsed=2, failedRequirementFromTemplate=3, exception=4, hardKilled=5)
securityClassToParameterString = {securityClassZeroSec: 'DistributeNullSec',
securityClassLowSec: 'DistributeLowSec',
securityClassHighSec: 'DistributeHighSec'}
KNOWN_SPACE_RANDOM_SEED = 42
BLACKLIST_GENERIC = 1
BLACKLIST_INCURSIONS = 3
BLACKLIST_SLEEPER_SCOUTS = 4
DETERMINISTIC_PLACEMENT_AU_DISTANCE = 0.2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ResourceUserDataVO(object):
def __init__(self):
self._profile_type = None
self._profile_value = None
self._report_date = None
self._user_cnt = None
self._user_ratio = None
@property
def profile_type(self):
return self._profile_type
@profile_type.setter
def profile_type(self, value):
self._profile_type = value
@property
def profile_value(self):
return self._profile_value
@profile_value.setter
def profile_value(self, value):
self._profile_value = value
@property
def report_date(self):
return self._report_date
@report_date.setter
def report_date(self, value):
self._report_date = value
@property
def user_cnt(self):
return self._user_cnt
@user_cnt.setter
def user_cnt(self, value):
self._user_cnt = value
@property
def user_ratio(self):
return self._user_ratio
@user_ratio.setter
def user_ratio(self, value):
self._user_ratio = value
def to_alipay_dict(self):
params = dict()
if self.profile_type:
if hasattr(self.profile_type, 'to_alipay_dict'):
params['profile_type'] = self.profile_type.to_alipay_dict()
else:
params['profile_type'] = self.profile_type
if self.profile_value:
if hasattr(self.profile_value, 'to_alipay_dict'):
params['profile_value'] = self.profile_value.to_alipay_dict()
else:
params['profile_value'] = self.profile_value
if self.report_date:
if hasattr(self.report_date, 'to_alipay_dict'):
params['report_date'] = self.report_date.to_alipay_dict()
else:
params['report_date'] = self.report_date
if self.user_cnt:
if hasattr(self.user_cnt, 'to_alipay_dict'):
params['user_cnt'] = self.user_cnt.to_alipay_dict()
else:
params['user_cnt'] = self.user_cnt
if self.user_ratio:
if hasattr(self.user_ratio, 'to_alipay_dict'):
params['user_ratio'] = self.user_ratio.to_alipay_dict()
else:
params['user_ratio'] = self.user_ratio
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ResourceUserDataVO()
if 'profile_type' in d:
o.profile_type = d['profile_type']
if 'profile_value' in d:
o.profile_value = d['profile_value']
if 'report_date' in d:
o.report_date = d['report_date']
if 'user_cnt' in d:
o.user_cnt = d['user_cnt']
if 'user_ratio' in d:
o.user_ratio = d['user_ratio']
return o
|
# Web Scraper
# Imports
import os
import re
import csv
import pickle
import requests
from bs4 import BeautifulSoup
# Variável com o endereço da página
# Inicie o servidor web com o comando: python -m http.server
PAGE = "http://localhost:8000/index.html"
# Função para extrair os dados
def extrai_dados(cb):
# Extraindo o nome do carro
str_name = cb.find('span', class_='car_name').text
# Extraindo o ano de lançamento do carro
str_year = cb.find('span', class_='from').text
str_year = str_year.replace('(','')
year = int(str_year.rsplit(sep=',')[0])
# Se o número de cilindros não for maior que zero, geramos mensagem de erro
assert year > 0, f"Esperando que o ano seja positivo e não {year}"
# Extraindo o número de cilindros e converte para int
str_cylinders = cb.find('span', class_='cylinders').text
cylinders = int(str_cylinders)
# Se o número de cilindros não for maior que zero, geramos mensagem de erro
assert cylinders > 0, f"Esperando que o número de cilindros seja positivo e não {cylinders}"
# Extraindo o peso do carro
str_weight = cb.find('span', class_='weight').text
# Removemos as vírgulas
weight = float(str_weight.replace(',', '.'))
# Se o peso não for maior que zero, geramos mensagem de erro
assert weight > 0, f"Esperando que o peso seja positivo e não {weight}"
# Extraindo a aceleração
acceleration = float(cb.find('span', class_='acceleration').text)
# Se a aceleração não for maior que zero, geramos mensagem de erro
assert acceleration > 0, f"Expecting acceleration to be positive"
# Geramos um dicinário para cada linha extraída
linha = dict(name=str_name, year=year, cylinders=cylinders, weight=weight, acceleration=acceleration)
return linha
def processa_blocos_carros(soup):
# Extraindo informações de repetidas divisões (tag div)
car_blocks = soup.find_all('div', class_='car_block')
# Lista vazia para receber as linhas
linhas = []
# Loop pelos blocos de dados de carros
for cb in car_blocks:
linha = extrai_dados(cb)
linhas.append(linha)
print(f"\nTemos {len(linhas)} linhas de dados retornadas do scraping da página!")
# Imprime a primeira e a última linha
print("\nPrimeira linha copiada:")
print(linhas[0])
print("\nÚltima linha copiada:")
print(linhas[-1])
print("\n")
# Grava o resultado em csv
with open("dados_copiados_v1.csv", "w") as f:
writer = csv.DictWriter(f, fieldnames = linha.keys())
writer.writeheader()
writer.writerows(linhas)
# Execução principal do programa
if __name__ == "__main__":
# Arquivo para guardar os dados copiados em cache
filename = 'dados_copiados_v1.pickle'
# Se o arquivo já existir, carregamos o arquivo
if os.path.exists(filename):
with open(filename, 'rb') as f:
print(f"\nCarregando o cache a partir do arquivo {filename}")
result = pickle.load(f)
# Se não, copiamos da página web
else:
print(f"\nCopiando dados da página {PAGE}.")
result = requests.get(PAGE)
with open(filename, 'wb') as f:
print(f"\nGravando o cache em {filename}")
pickle.dump(result, f)
# Se o status for diferente de 200, geramos mensagem de erro
assert result.status_code == 200, f"Obteve status {result.status_code} verifique sua conexão!"
# Obtém o texto da página
texto_web = result.text
# Faz o parser do texto da página
soup = BeautifulSoup(texto_web, 'html.parser')
# Processa os dados de carros
processa_blocos_carros(soup)
|
# emacs: -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
"""
Dataset loading for ITK images and their respective labels within their metadata
header
"""
# Author: Nicolas Toussaint <nicolas.toussaint@gmail.com>
# King's College London, UK
import SimpleITK as sitk
import numpy as np
import os
from torch.utils.data import Dataset
IMG_EXTENSIONS = ['.nii.gz', '.nii', '.mha', '.mhd']
def _is_image_file(filename):
"""
Is the given extension in the filename supported ?
"""
# FIXME: Need to add all available SimpleITK types!
IMG_EXTENSIONS = ['.nii.gz', '.nii', '.mha', '.mhd']
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def load_image(fname):
"""
Load supported image and return the loaded image.
"""
return sitk.ReadImage(fname)
def save_image(itk_img, fname):
"""
Save ITK image with the given filename
"""
sitk.WriteImage(itk_img, fname)
def load_metadata(itk_img, key):
"""
Load the metadata of the input itk image associated with key.
"""
return itk_img.GetMetaData(key) if itk_img.HasMetaDataKey(key) else None
def extractlabelfromfile(fname):
with open(fname) as f:
for line in f:
if "Label =" in line:
f.close()
return line.split(' = ')[1].strip()
f.close()
return None
def find_classes(filenames):
classes = [extractlabelfromfile(f) for f in filenames]
classes = list(set(classes))
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def calculate_class_cardinality(filenames):
_, class_to_idx = find_classes(filenames=filenames)
classes = [extractlabelfromfile(f) for f in filenames]
classes = [class_to_idx[l] for l in classes]
_, counts = np.unique(classes, return_counts=True)
return counts
def calculate_sample_weights(filenames):
_, class_to_idx = find_classes(filenames=filenames)
classes = [extractlabelfromfile(f) for f in filenames]
classes = [class_to_idx[l] for l in classes]
_, counts = np.unique(classes, return_counts=True)
prob = counts / float(np.sum(counts))
reciprocal_weights = [prob[classes[index]] for index in range(len(classes))]
weights = (1. / np.array(reciprocal_weights))
weights = weights / np.sum(weights)
return weights
class ITKMetaDataClassification(Dataset):
"""
Arguments
---------
root : string
Root directory of dataset. The folder should contain all images for each
mode of the dataset ('train', 'validate', or 'infer'). Each mode-version
of the dataset should be in a subfolder of the root directory
The images can be in any ITK readable format (e.g. .mha/.mhd)
For the 'train' and 'validate' modes, each image should contain a metadata
key 'Label' in its dictionary/header
mode : string, (Default: 'train')
'train', 'validate', or 'infer'
Loads data from these folders.
train and validate folders both must contain subfolders images and labels while
infer folder needs just images subfolder.
transform : callable, optional
A function/transform that takes in input itk image or Tensor and returns a
transformed
version. E.g, ``transforms.RandomCrop``
"""
def __init__(self, root, mode='train', transform=None, target_transform=None):
# training set or test set
assert(mode in ['train', 'validate', 'infer'])
self.mode = mode
if mode == 'train':
self.root = os.path.join(root, 'train')
elif mode == 'validate':
self.root = os.path.join(root, 'validate')
else:
self.root = os.path.join(root, 'infer') if os.path.exists(os.path.join(root, 'infer')) else root
def gglob(path, regexp=None):
"""Recursive glob
"""
import fnmatch
import os
matches = []
if regexp is None:
regexp = '*'
for root, dirnames, filenames in os.walk(path, followlinks=True):
for filename in fnmatch.filter(filenames, regexp):
matches.append(os.path.join(root, filename))
return matches
# Get filenames of all the available images
self.filenames = [y for y in gglob(self.root, '*.*') if _is_image_file(y)]
if len(self.filenames) == 0:
raise(RuntimeError("Found 0 images in subfolders of: " + self.root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.filenames.sort()
self.transform = transform
self.target_transform = target_transform
classes, class_to_idx = find_classes(self.filenames)
self.classes = classes
self.class_to_idx = class_to_idx
self.class_cardinality = calculate_class_cardinality(self.filenames)
self.sample_weights = calculate_sample_weights(self.filenames)
def __getitem__(self, index):
"""
Arguments
---------
index : int
index position to return the data
Returns
-------
tuple: (image, label) where label the organ apparent in the image
"""
image = load_image(self.filenames[index])
labels = None
label = load_metadata(image, 'Label')
if label is not None:
labels = [0] * len(self.classes)
labels[self.class_to_idx[label]] = 1
labels = np.array(labels, dtype=np.float32)
if self.transform is not None:
image = self.transform(image)
if self.target_transform is not None:
labels = self.target_transform(labels)
if (self.mode == 'infer') or (labels is None):
return image
else:
return image, labels
def __len__(self):
return len(self.filenames)
def get_filenames(self):
return self.filenames
def get_root(self):
return self.root
def get_classes(self):
return self.classes
def get_class_cardinality(self):
return self.class_cardinality
def get_sample_weights(self):
return self.sample_weights
|
import os
from statsmodels.tsa.api import Holt
import pandas as pd
from sklearn import svm
from sklearn.svm import SVR
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from datetime import date, timedelta, datetime
import numpy as np
from settings import file_to_load
########## SETTINGS ##########
forecast_days = 20
show_fit_on_actual = False
show_that_one_giant_red_line = True
ignore_shitty_ones = True
##############################
if ignore_shitty_ones:
kernels = ['linear', 'poly']
else:
kernels = ['linear', 'poly', 'rbf', 'sigmoid'] # for SVM stuf
def getplotdata(clf):
x,y = [],[]
if show_fit_on_actual:
start = 0
else:
start = tomorrow
for i in range(start, tomorrow+forecast_days):
x.append(i)
y.append(clf.predict([[i]])[0])
return x,y
def predict_forecast_days(type, clf):
#print(type)
#for i in range(0,forecast_days+1):
# print("\t" + str(tomorrow+i) + str(clf.predict([[tomorrow+i]])))
x1, y1 = getplotdata(clf)
plt.plot(x1, y1, label=type)
def get_time_labels():
edate = date.today() + timedelta(days=forecast_days) # end date
delta = edate - start_date # as timedelta
labels = []
for i in range(0,delta.days + 1,1):
day = start_date + timedelta(days=i)
labels.append(str(day.month) + "/" + str(day.day))
return labels
##############################################################
df = pd.read_csv('data' +os.path.sep + file_to_load+'.csv')
start_date = datetime.strptime(df['date'][0], '%m/%d/%y').date()
total_data = df['total_cum'].values.tolist()
time_data = list(range(0, len(total_data)))
tomorrow = len(time_data)
air = pd.Series(total_data, time_data)
fit1 = Holt(air).fit(smoothing_level=0.8, smoothing_slope=0.2, optimized=False)
fcast1 = fit1.forecast(forecast_days)
fit2 = Holt(air, exponential=True).fit(smoothing_level=0.8, smoothing_slope=0.2, optimized=False)
fcast2 = fit2.forecast(forecast_days)
fit3 = Holt(air, damped=True).fit(smoothing_level=0.8, smoothing_slope=0.2)
fcast3 = fit3.forecast(forecast_days)
ax = air.plot(color="black", marker="o", figsize=(12,8), legend=True, label="actual")
if show_fit_on_actual:
fit1.fittedvalues.plot(ax=ax, color='blue', marker="o", label="Holt's Linear Trend")
fcast1.plot(ax=ax, color='blue', legend=True, label="Holt's Linear trend - Pred.")
if show_that_one_giant_red_line:
if show_fit_on_actual:
fit2.fittedvalues.plot(ax=ax, color='red', marker="o", label="Holt's Exponential Trend")
fcast2.plot(ax=ax, color='red', legend=True, label="Holt's Exponential Trend - Pred.")
if show_fit_on_actual:
fit3.fittedvalues.plot(ax=ax, color='green', marker="o", label="Holt's Additive damped trend")
fcast3.plot(ax=ax, color='green', legend=True, label="Holt's Additive damped trend - Pred.")
######################
y = np.asarray(total_data).reshape(-1,1)
x = np.asarray(time_data).reshape(-1,1)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.01, random_state=422)
#####################
clf = LinearRegression(n_jobs=-1)
clf.fit(x_train, y_train)
predict_forecast_days("Linear Regression", clf)
#####################
if not ignore_shitty_ones:
clf = SVR()
clf.fit(x_train, y_train)
predict_forecast_days("SVR", clf)
#####################
for k in kernels:
clf = svm.SVR(kernel=k)
clf.fit(x_train, y_train)
predict_forecast_days("SVR "+k, clf)
# idk why i have to do this
plt.xticks(ticks=range(0, len(get_time_labels())+1), labels=get_time_labels())
plt.xticks(rotation=45)
plt.ylabel("Total confirmed cases")
plt.title(file_to_load)
plt.legend()
plt.grid()
plt.show()
|
import httplib2
import random
def get_data():
return open("DATA.lst", 'r').readline()
def get_content_server(body):
return httplib2.Http().request(
"http://open-abbey.appspot.com/interactive/nim-game",
"POST",
body)[1].decode()
def get_body(*args):
if len(args) == 1:
return "token: " + args[0] + "\n"
elif len(args) == 2:
return "token: " + args[0] + "\n move: " + args[1] + "\n"
else:
raise TypeError
def convert_list_str_to_list_int(element_list_str):
""" Funtion for conver str list to int list. """
return int(element_list_str)
def normalize_response(element):
return element.replace('\n', '').replace('\r', '')
def next_step(cant):
i = random.randint(0, 2)
if cant[0] > 9 or cant[1] > 9 or cant[2] > 9:
if cant[i] > 0:
if cant[i] % 3 != 0:
return str(i) + " " + str((cant[i] % 3))
else:
return str(i) + " " + str(3)
elif i == 0 and cant[i + 1] > 0:
if cant[i + 1] % 3 != 0:
return str(i + 1) + " " + str((cant[i + 1] % 3))
else:
return str(i + 1) + " " + str(3)
elif i == 1 and cant[i + 1] > 0:
if cant[i + 1] % 3 != 0:
return str(i + 1) + " " + str((cant[i + 1] % 3))
else:
return str(i + 1) + " " + str(3)
elif i == 1 and cant[i - 1] > 0:
if cant[i - 1] % 3 != 0:
return str(i - 1) + " " + str((cant[i - 1] % 3))
else:
return str(i - 1) + " " + str(3)
elif i == 2 and cant[i - 1] > 0:
if cant[i - 1] % 3 != 0:
return str(i - 1) + " " + str((cant[i - 1] % 3))
else:
return str(i - 1) + " " + str(3)
else:
if cant[0] > 0:
if (cant[0] % 2) == 0:
return str(0) + " " + str(2)
else:
return str(0) + " " + str(1)
elif cant[1] > 0:
if (cant[1] % 2) == 0:
return str(1) + " " + str(2)
else:
return str(1) + " " + str(1)
elif cant[2] > 0:
if (cant[2] % 2) == 0:
return str(2) + " " + str(2)
else:
return str(2) + " " + str(1)
CONTENT = get_content_server(get_body(get_data()))
CONTENT = list(map(normalize_response, CONTENT.split(' ')))
NEXT_STEP = next_step(list(map(convert_list_str_to_list_int, CONTENT[1:])))
CONTENT = get_content_server(get_body(get_data(), NEXT_STEP))
CONTENT = CONTENT.split('\n')
while not CONTENT[2].__contains__("end"):
print("**********************************")
CONTENT = list(map(normalize_response, CONTENT[1].split(' ')))
print(CONTENT[1:])
NEXT_STEP = next_step(list(map(convert_list_str_to_list_int, CONTENT[1:])))
print(NEXT_STEP)
CONTENT = get_content_server(get_body(get_data(), NEXT_STEP))
CONTENT = CONTENT.split('\n')
print("**********************************")
print(CONTENT) |
import json
from gps import *
session = gps()
session.stream(WATCH_ENABLE)
for report in session:
if report['class']!='TPV' or report['mode'] not in [2, 3]:
continue
icarus = open('icarus_base.json', 'r')
point = json.load(icarus)
icarus.close()
point['features'][0]['geometry']['coordinates'] = [report['lon'],
report['lat']]
icarus2 = open('icarus.json', 'w')
json.dump(point, icarus2)
# print point
icarus2.close()
break
|
def add(a,b):
return(a+b)
def subtract(a,b):
return(a-b)
def mult(a,b):
return(a*b)
def div(a,b):
return(a/b)
print(add(5,4))
print(subtract(5,4))
print(mult(5,4))
print(div(5,4))
|
import re
from typing import Dict
_short_tag_re = re.compile(r'^<([a-z0-9]+)(( [a-z0-9-]+="[^"]*")*)/>$')
_attribute_re = re.compile(r' ([a-z0-9-]+)="([^"]*)"')
def parse_short_tag(string):
matches = _short_tag_re.match(string)
assert matches is not None
tag = Tag(matches.group(1))
attribute_string = matches.group(2)
for match in _attribute_re.findall(attribute_string):
tag.add_attribute(*match)
return tag
class Tag:
def __init__(self, name):
self.name = name
self.attributes = {} # type: Dict[str, str]
def add_attribute(self, name, value):
if name in self.attributes:
raise AssertionError("duplicate attribute '" + name + "'")
self.attributes[name] = value
|
num = input('Hányadik lettél a versenyen?')
if num <= 3:
print('dobogó')
if kerdes > 3:
print('nem baj szép volt!') |
# Generated by Django 3.1.6 on 2021-02-08 17:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200, verbose_name='Название категории')),
('slug', models.SlugField(max_length=200, unique=True, verbose_name='Ссылка')),
],
options={
'verbose_name': 'Категория',
'verbose_name_plural': 'Категории',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150, verbose_name='Название')),
('city', models.CharField(max_length=20, verbose_name='Город')),
('address', models.CharField(blank=True, max_length=255, verbose_name='Адрес')),
('decision_maker', models.CharField(blank=True, max_length=255, verbose_name='ЛПР')),
('phone', models.CharField(max_length=12, verbose_name='Телефон')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='Электронная почта')),
('first_call', models.TextField(blank=True, verbose_name='Первый звонок')),
('description', models.TextField(blank=True, verbose_name='Описание/Комментарий')),
('next_call', models.TextField(blank=True, verbose_name='Следующий звонок')),
('slug', models.SlugField(max_length=150, unique=True, verbose_name='Ссылка на клиента')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Создан')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлён')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='clients', to='crm.category')),
('responsible', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='clients', to=settings.AUTH_USER_MODEL, verbose_name='Ответственный')),
],
options={
'verbose_name': 'Клиент',
'verbose_name_plural': 'Клиенты',
'ordering': ('-updated', '-created'),
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField(verbose_name='Комментарий')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Добавлен')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлён')),
('active', models.BooleanField(default=True, verbose_name='Показать')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Комментатор')),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='crm.client', verbose_name='Комментарий к клиенту')),
],
options={
'verbose_name': 'Комментарий',
'verbose_name_plural': 'Комментарии',
'ordering': ('created',),
},
),
]
|
class Employee:
def __init__(self , fname , lname):
self.fname = fname
self.lname = lname
# self.email = f"{fname}.{lname}@dw.net"
def explain(self):
return f"The employee name is {self.fname} {self.lname}."
@property
def email(self):
if self.fname==None or self.lname==None:
return "Error : Email is not found , please set your email first using setter"
return f"{self.fname}.{self.lname}@dw.net"
@email.setter
def email(self, string):
print("Setting now...")
names = string.split("@")[0]
self.fname = names.split(".")[0]
self.lname = names.split(".")[1]
@email.deleter
def email(self):
self.fname = None
self.lname = None
dark = Employee ("Dark" , "Duniya")
web = Employee ("Web" , "Duniya")
print(dark.explain())
print(web.explain())
dark.fname = "US"
print(dark.email)
dark.email = "dark.1908@dw.net"
print(dark.email)
del dark.email
print(dark.email)
print("\nHere comes an error because the email has been deleted using del function\n")
web.email = "web.1908@dw.net"
print(web.email)
print("\nFor learn how to run and access setters & property decorators , open this file in your editor and learn about this file")
print("\nThank You\n") |
from config.configs import *
import pandas as pd
import argparse
import random
random.seed(1234)
parser = argparse.ArgumentParser(description="Run dataset splitting.")
parser.add_argument('--dataset', nargs='?', default='tradesy', help='dataset name')
parser.add_argument('--validation', type=bool, default=False, help='True --> use validation, False --> no validation')
parser.add_argument('--shuffle', type=bool, default=True, help='dataset shuffle')
parser.add_argument('--column_stratify', type=list, default=[0], help='list of columns to use for stratification')
args = parser.parse_args()
# read all interactions
df = pd.read_csv(all_interactions.format(args.dataset), delimiter='\t', header=None)
df = df.groupby(args.column_stratify).apply(lambda x: x.sample(frac=1)).reset_index(drop=True)
df_grouped = df.groupby(args.column_stratify)
if args.validation:
validation = df_grouped.tail(1).drop_duplicates()
validation[2] = [1.0] * len(validation)
validation.to_csv(validation_path.format(args.dataset), index=False, sep='\t', header=None)
df.drop(index=validation.index, inplace=True)
df_grouped = df.groupby(args.column_stratify)
test = df_grouped.tail(1).drop_duplicates()
train = df.drop(index=test.index)
train[2] = [1.0] * len(train)
test[2] = [1.0] * len(test)
# write to file
train.to_csv(training_path.format(args.dataset), index=False, sep='\t', header=None)
test.to_csv(test_path.format(args.dataset), index=False, sep='\t', header=None)
|
a = [3, 5, 6, 8, 10, 11, 45, 86, 95, 1826, 94648, 0, 3, 17]
minimum = 5000000000
for i in a:
if i < minimum and i % 10 == 5:
minimum = i
print(minimum)
|
def latest(scores):
last_added_score = len(scores) - 1
return scores[last_added_score]
def personal_best(scores):
scores.sort()
bigest_score = len(scores) - 1
return scores[bigest_score]
def personal_top_three(scores):
# scores.sort()
# scores[-1]
# scores[-2]
# scores[-3]
# personal_top_three_scores = []
# personal_top_three_scores.append(scores[len(scores) - 1])
# personal_top_three_scores.append(scores[len(scores) - 2])
# personal_top_three_scores.append(scores[len(scores) - 3])
# return personal_top_three_scores
# scores.sort(reverse=True)
# for s in range(len(scores)):
# current = scores[s]
# if current > 2:
# scores.remove()
# print(s)
# a = b = c = -1
# for s in scores:
# current = s
# if current > a:
# temp = a
# a = current
# current = temp
# if current > b:
# temp = b
# b = current
# current = temp
# if current > c:
# c = current
# top_three = [-1, -2, -3]
# for s in scores:
# min_of_three = min(top_three)
# if s > min_of_three:
# top_three.remove(min_of_three)
# top_three.append(s)
# return top_three
# top_three = []
# for s in scores:
# if len(top_three) < 3:
# top_three.append(s)
# else:
# top_three.append(s)
# min_of_four = min(top_three)
# top_three.remove(min_of_four)
# return top_three
|
#!/usr/bin/env python
# coding=utf-8
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import argparse
import time
import zmq
from utils import get_local_ip
context = zmq.Context()
socket = context.socket(zmq.DEALER)
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--connect-address', default='tcp://127.0.0.1:5555')
parser.add_argument('-H', '--hider', default=False, action='store_true')
parser.add_argument('-C', '--city', default='Barcelona')
args = parser.parse_args()
socket.connect(args.connect_address)
hider = args.hider
city = args.hider
myip = get_local_ip()
address = args.connect_address
while 1:
if hider : command = "REGISTER {}:{}".format(myip, '5555')
else : command = "LIST"
print 'Sending command "{}" to dealer {}'.format(command, address)
socket.send(command)
print socket.recv()
time.sleep(1)
|
from cas.rule import RuleList
from cas.operable import Operable, translate
from cas.symbol import Symbol
class Indexed(Operable):
@staticmethod
def add_order():
return Symbol.add_order()+.5
@staticmethod
def mul_order():
return Symbol.mul_order()+.5
rules = RuleList()
def __new__(cls, base, indices, **kwargs):
if not hasattr(indices, '__iter__'):
indices = [indices]
label = fr'{base}_{{{" ".join(map(str,indices))}}}'
name = fr'{base.name}_{{{" ".join(map(str,indices))}}}'
new_obj = super().__new__( cls, label )
if not hasattr(new_obj, 'name'):
new_obj.base = base
new_obj.name = name
new_obj.label = label
new_obj.indices = indices
new_obj.rules = RuleList()
return new_obj
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args):
return AppliedIndexedFunction( self, *map(translate, args) )
from cas.appliedindexedfunction import AppliedIndexedFunction |
from flask import Flask, jsonify, request
import os
import database.db_connector as db
# Configuration
app = Flask(__name__, static_folder='./front/build', static_url_path='/')
'''
CONNECTS FOR EVERY ROUTE TO ALLOW MULTIPLE REQUESTS
'''
# Routes
@app.route('/')
def root():
# return render_template("main.j2")
return app.send_static_file('index.html')
# This enables use of React Router
@app.errorhandler(404)
def not_found(e):
return app.send_static_file('index.html')
@app.route('/users', methods=['GET', 'POST', 'DELETE', 'PUT'])
def users():
db_connection = db.connect_to_database()
# if we receive a get request we need to execute a get query and return
# all users from the DB as json.
if request.method == 'GET':
# execute SQL query
query = "SELECT * FROM Users;"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
print(jsonify(results), type(results), results)
return jsonify(results)
# POST ROUTE
if request.method == 'POST':
# extract data from request object
json_data = request.get_json()
email = json_data['email']
username = json_data['username']
# execute SQL query
query = f"INSERT INTO Users (email, username) \
VALUE ('{email}', '{username}');"
cursor = db.execute_query(db_connection=db_connection, query=query)
# return the newly added row for the front end to add
query = "SELECT * FROM Users WHERE \
userID = (SELECT MAX(userID) FROM Users);"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
return jsonify(results)
# DELETE ROUTE
if request.method == 'DELETE':
# extract data from request object
json_data = request.get_json()
user_id = json_data['userID']
# execute SQL query
query = f"DELETE FROM Users WHERE userID = '{user_id}';"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
return jsonify(results)
# PUT ROUTE
if request.method == 'PUT':
# extract data from request object
json_data = request.get_json()
email = json_data['email']
username = json_data['username']
userID = json_data['userID']
# execute SQL query
query = f"UPDATE Users SET username = '{username}', email = '{email}' WHERE userID = '{userID}';"
cursor = db.execute_query(db_connection=db_connection, query=query)
query2 = "SELECT * FROM Users;"
cursor = db.execute_query(db_connection=db_connection, query=query2)
results = cursor.fetchall()
return jsonify(results)
@ app.route('/grocery_lists', methods=['GET', 'POST', 'DELETE'])
def grocery_lists():
db_connection = db.connect_to_database()
# if we receive a get request we need to execute a get query and return
# all users from the DB as json.
if request.method == 'GET':
query = "SELECT username, listDate, listID \
FROM GroceryLists JOIN Users USING (userID)"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
# return jsonify(results)
return(jsonify(results), 200)
if request.method == 'POST':
# extract data from request object
json_data = request.get_json()
username = json_data['username']
# listDate = json_data['listDate']
# execute SQL query
query = f"INSERT INTO GroceryLists (userID, listDate) \
VALUE ((SELECT userID from Users \
WHERE username = '{username}'), NOW())"
cursor = db.execute_query(db_connection=db_connection, query=query)
# get and return the row that was just added (most recent date)
query2 = "SELECT username, listDate, listID \
FROM GroceryLists JOIN Users USING (userID) \
ORDER BY listDate DESC LIMIT 1"
cursor = db.execute_query(db_connection=db_connection, query=query2)
results = cursor.fetchall()
print(jsonify(results), type(results), results)
return jsonify(results)
if request.method == 'DELETE':
# extract data from request object
json_data = request.get_json()
listID = json_data['listID']
# execute SQL query
query = f"DELETE FROM GroceryLists WHERE listID = '{listID}';"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
return jsonify(results)
@ app.route('/ingredients', methods=['GET', 'POST', 'DELETE', 'PUT'])
def ingredients():
db_connection = db.connect_to_database()
# if we receive a get request we need to execute a get query and return
# all users from the DB as json.
if request.method == 'GET':
query = "SELECT \
Ingredients.name, FoodGroups.name fgname, Ingredients.ingredientID \
FROM Ingredients LEFT JOIN FoodGroups USING (foodGroupID)"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
return(jsonify(results))
if request.method == 'POST':
json_data = request.get_json()
ingredientName = json_data['name']
try:
foodGroupName = json_data['fgname']
except KeyError:
foodGroupName = 'NULL'
# execute INSERT
query = f"INSERT INTO Ingredients (name, foodGroupID) \
VALUE \
('{ingredientName}', \
(SELECT foodGroupID from FoodGroups \
WHERE name='{foodGroupName}'));"
cursor = db.execute_query(db_connection=db_connection, query=query)
# return the inserted row
query = "SELECT \
Ingredients.name, FoodGroups.name fgname, Ingredients.ingredientID \
FROM Ingredients LEFT JOIN FoodGroups USING (foodGroupID) WHERE \
Ingredients.ingredientID = (SELECT MAX(ingredientID) FROM Ingredients);"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
return jsonify(results)
if request.method == 'PUT':
json_data = request.get_json()
ingredientName = json_data['name']
foodGroupName = json_data['fgname']
ingredientID = json_data['ingredientID']
query = f"UPDATE Ingredients \
SET \
foodGroupID=(SELECT foodGroupID from FoodGroups WHERE name='{foodGroupName}'),\
name='{ingredientName}' \
WHERE ingredientID = '{ingredientID}'"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
return jsonify(results)
if request.method == 'DELETE':
# extract data from request object
json_data = request.get_json()
ingredientID = json_data['ingredientID']
query = f"DELETE FROM Ingredients \
WHERE ingredientID = '{ingredientID}';"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
return jsonify(results)
@ app.route('/food_group', methods=['GET', 'POST', 'DELETE', 'PUT'])
def food_group():
db_connection = db.connect_to_database()
# if we receive a get request we need to execute a get query and return
# all users from the DB as json.
if request.method == 'GET':
query = "SELECT name, foodGroupID FROM FoodGroups"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
return(jsonify(results))
if request.method == 'POST':
# extract data from request object
json_data = request.get_json()
name = json_data['name']
# execute INSERT
query = f"INSERT INTO FoodGroups (name) VALUE ('{name}');"
cursor = db.execute_query(db_connection=db_connection, query=query)
# return the inserted row
query = "SELECT * FROM FoodGroups WHERE \
foodGroupID = (SELECT MAX(foodGroupID) FROM FoodGroups);"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
print(jsonify(results), type(results), results)
return jsonify(results)
if request.method == 'DELETE':
# extract data from request object
json_data = request.get_json()
foodGroupID = json_data['foodGroupID']
# execute SQL query
query = f"DELETE FROM FoodGroups WHERE foodGroupID = '{foodGroupID}';"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
return jsonify(results)
# PUT ROUTE
if request.method == 'PUT':
# extract data from request object
json_data = request.get_json()
foodGroupID = json_data['foodGroupID']
name = json_data['name']
# execute SQL query
query = f"UPDATE FoodGroups SET name = '{name}' \
WHERE foodGroupID = '{foodGroupID}';"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
return jsonify(results)
@ app.route('/user_ingredients/<int:user_id>', methods=['GET', 'POST', 'DELETE'])
def user_ingredients(user_id):
db_connection = db.connect_to_database()
# if we receive a get request we need to execute a get query and return
# all users from the DB as json.
if request.method == 'GET':
query = f"SELECT name from User_Ingredients \
JOIN Ingredients USING (ingredientID)\
WHERE User_Ingredients.userID = {user_id}"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
print(jsonify(results))
return jsonify(results)
if request.method == 'POST':
json_data = request.get_json()
# ingredientName = json_data['name']
ingredientID = json_data['ingredientID']
# ingredientID = json_data['name']
# execute INSERT
query = f"INSERT INTO User_Ingredients (userID, ingredientID) \
VALUES ({user_id}, {ingredientID});"
cursor = db.execute_query(db_connection=db_connection, query=query)
# return the updated data
query = f"SELECT name from User_Ingredients \
JOIN Ingredients USING (ingredientID)\
WHERE User_Ingredients.userID = {user_id}"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
return jsonify(results)
if request.method == 'DELETE':
# extract data from request object
json_data = request.get_json()
# ingredientID = json_data['name']
ingredientID = json_data['ingredientID']
query = f"DELETE FROM User_Ingredients \
WHERE userID={user_id} AND ingredientID={ingredientID};"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
return jsonify(results)
# Route to provide the ingredients from a given grocery list
@ app.route('/grocery_list_ingredients/<int:listID>', methods=['GET', 'POST', 'DELETE'])
def grocery_list_ingredients(listID):
db_connection = db.connect_to_database()
# if we receive a get request we need to execute a get query and return
# all users from the DB as json.
if request.method == 'GET':
query = f"SELECT name, ingredientID from GroceryList_Ingredients \
JOIN Ingredients USING (ingredientID) \
WHERE GroceryList_Ingredients.listID = {listID}"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
return jsonify(results)
if request.method == 'POST':
json_data = request.get_json()
name = json_data['name']
# add the ingredient to the composite list
query = f"INSERT INTO GroceryList_Ingredients \
(listID, ingredientID) \
VALUES ('{listID}', \
(SELECT ingredientID from Ingredients \
WHERE name='{name}'));"
cursor = db.execute_query(db_connection=db_connection, query=query)
# retrieve the updated data
query = f"SELECT name, ingredientID from GroceryList_Ingredients \
JOIN Ingredients USING (ingredientID) \
WHERE GroceryList_Ingredients.listID = {listID}"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
return jsonify(results)
if request.method == 'DELETE':
json_data = request.get_json()
name = json_data['name']
query = f"DELETE FROM GroceryList_Ingredients \
WHERE ingredientID=\
(SELECT ingredientID from Ingredients WHERE name='{name}')\
AND listID = '{listID}';"
cursor = db.execute_query(db_connection=db_connection, query=query)
results = cursor.fetchall()
return jsonify(results)
# Listener
if __name__ == "__main__":
port = int(os.environ.get('PORT', 80))
app.run(port=port, debug=False) # debug option allows live changes
|
def Rotate_Value_Generate(AccuracyField):
AccuracyField = cmds.floatField(AccuracyField,q=1,v=1)
selList = cmds.ls(sl=1)
for sel in selList:
try:
cmds.addAttr("%s.RotateSpeed"%sel,q=1,ex=1)
except:
cmds.addAttr(sel,ln="RotateSpeed",sn="rs",at="double",dv=0,k=1)
Mel_Expression = """
float $fps = `currentTimeUnitToFPS`;
float $Offset = {1};
float $CurrentFrame = `currentTime -query`;
float $PrevFrame = $CurrentFrame - $Offset;
float $RotateSpeed = (`getAttr -time $CurrentFrame {0}.ry` - `getAttr -time $PrevFrame {0}.ry`)/(1*$Offset)*$fps;
{0}.RotateSpeed = $RotateSpeed;
""".format(sel,AccuracyField)
cmds.expression(o=sel,s=Mel_Expression,ae=1,uc=all)
def Translate_Value_Generate(AccuracyField):
AccuracyField = cmds.floatField(AccuracyField,q=1,v=1)
selList = cmds.ls(sl=1)
for sel in selList:
try:
cmds.addAttr("%s.TranslateSpeed"%sel,q=1,ex=1)
except:
cmds.addAttr(sel,ln="TranslateSpeed",sn="ts",at="double",dv=0,k=1)
Mel_Expression = """
float $fps = `currentTimeUnitToFPS`;
float $Offset = {1};
float $CurrentFrame = `currentTime -query`;
float $PrevFrame = $CurrentFrame - $Offset;
float $DistantTranslateX = `getAttr -time $CurrentFrame {0}.tx` - `getAttr -time $PrevFrame {0}.tx`;
float $DistantTranslateZ = `getAttr -time $CurrentFrame {0}.tz` - `getAttr -time $PrevFrame {0}.tz`;
float $TranslateSpeed = `sqrt ($DistantTranslateX * $DistantTranslateX + $DistantTranslateZ * $DistantTranslateZ)`/(1*$Offset)*$fps;
{0}.TranslateSpeed = $TranslateSpeed;
""".format(sel,AccuracyField)
cmds.expression(o=sel,s=Mel_Expression,ae=1,uc=all)
def Rotate_Accel_Value_Generate(AccuracyField):
AccuracyField = cmds.floatField(AccuracyField,q=1,v=1)
selList = cmds.ls(sl=1)
for sel in selList:
try:
cmds.addAttr("%s.RotateAccelSpeed"%sel,q=1,ex=1)
except:
cmds.addAttr(sel,ln="RotateAccelSpeed",sn="ras",at="double",dv=0,k=1)
Mel_Expression = """
float $fps = `currentTimeUnitToFPS`;
float $Offset = {1};
float $CurrentFrame = `currentTime -query`;
float $PrevFrame = $CurrentFrame - $Offset;
float $PrevSpeed = (`getAttr -time $PrevFrame {0}.ry` - `getAttr -time ($PrevFrame-$Offset) {0}.ry`)/(1*$Offset)*$fps;
float $NextFrame = $CurrentFrame + $Offset;
float $NextSpeed = (`getAttr -time $NextFrame {0}.ry` - `getAttr -time ($NextFrame-$Offset) {0}.ry`)/(1*$Offset)*$fps;
float $RotateAccelSpeed;
if($NextSpeed == 0 || $PrevSpeed == 0){{
$RotateAccelSpeed = 0;
}}else{{
$RotateAccelSpeed = ($NextSpeed - $PrevSpeed)/(1*$Offset*2)*$fps;
}}
{0}.RotateAccelSpeed = $RotateAccelSpeed;
""".format(sel,AccuracyField)
cmds.expression(o=sel,s=Mel_Expression,ae=1,uc=all)
def Translate_Accel_Value_Generate(AccuracyField):
AccuracyField = cmds.floatField(AccuracyField,q=1,v=1)
selList = cmds.ls(sl=1)
for sel in selList:
try:
cmds.addAttr("%s.TranslateAccelSpeed"%sel,q=1,ex=1)
except:
cmds.addAttr(sel,ln="TranslateAccelSpeed",sn="tas",at="double",dv=0,k=1)
Mel_Expression = """
float $fps = `currentTimeUnitToFPS`;
float $Offset = {1};
float $CurrentFrame = `currentTime -query`;
float $PrevFrame = $CurrentFrame - $Offset;
float $DistantTranslateX = `getAttr -time $PrevFrame {0}.tx` - `getAttr -time ($PrevFrame-$Offset) {0}.tx`;
float $DistantTranslateZ = `getAttr -time $PrevFrame {0}.tz` - `getAttr -time ($PrevFrame-$Offset) {0}.tz`;
float $PrevSpeed = `sqrt ($DistantTranslateX * $DistantTranslateX + $DistantTranslateZ * $DistantTranslateZ)`/(1*$Offset)*$fps;
float $NextFrame = $CurrentFrame + $Offset;
float $DistantTranslateX = `getAttr -time $NextFrame {0}.tx` - `getAttr -time ($NextFrame-$Offset) {0}.tx`;
float $DistantTranslateZ = `getAttr -time $NextFrame {0}.tz` - `getAttr -time ($NextFrame-$Offset) {0}.tz`;
float $NextSpeed = `sqrt ($DistantTranslateX * $DistantTranslateX + $DistantTranslateZ * $DistantTranslateZ)`/(1*$Offset)*$fps;
float $TranslateAccelSpeed;
if($NextSpeed == 0 || $PrevSpeed == 0){{
$TranslateAccelSpeed = 0;
}}else{{
$TranslateAccelSpeed = ($NextSpeed - $PrevSpeed)/(1*$Offset*2)*$fps;
}}
{0}.TranslateAccelSpeed = $TranslateAccelSpeed;
""".format(sel,AccuracyField)
cmds.expression(o=sel,s=Mel_Expression,ae=1,uc=all)
def selectObj(text):
if len(cmds.ls(sl=True)) > 0 :
sel = cmds.ls(sl=1)[0]
cmds.textField(text,e=1,tx=sel)
else:
cmds.textField(text,e=1,tx="")
def Cam_Match(objText,Cam):
Visualizer = cmds.textField(objText,q=1,tx=1)
Camera = cmds.textField(Cam,q=1,tx=1)
cmds.select(Visualizer)
cmds.select(Camera,add=1)
def Connect_Visualizer(Visualizer,Cam,Attr,SpeedAttr,MinVal,MaxVal):
Cam = cmds.textField(Cam,q=1,tx=1)
Visualizer = cmds.textField(Visualizer,q=1,tx=1)
Attr = cmds.textField(Attr,q=1,tx=1)
SpeedAttr = cmds.floatField(SpeedAttr,q=1,v=1)
MinVal = cmds.floatField(MinVal,q=1,v=1)
MaxVal = cmds.floatField(MaxVal,q=1,v=1)
if MinVal >= MaxVal:
cmds.headsUpMessage(u"最大值最小值输入不正确")
cmds.warning(u"最大值最小值输入不正确")
return
try:
print cmds.getAttr(Attr)
except:
cmds.headsUpMessage(u"没有找到相关的属性!!")
cmds.warning(u"没有找到相关的属性!!")
return
if not cmds.objExists(Visualizer):
cmds.headsUpMessage(u"没有找到 Visualizer !!")
cmds.warning(u"没有找到 Visualizer !!")
return
if not cmds.objExists(Cam):
cmds.headsUpMessage(u"没有找到 Camera !!")
cmds.warning(u"没有找到 Camera !!")
return
Handle = cmds.listRelatives(Visualizer)[0]
Digits = cmds.listRelatives(Visualizer)[1]
HandleLocator = cmds.listRelatives(Handle)[0]
if not cmds.objectType( HandleLocator ) == "locator":
cmds.headsUpMessage(u"没有找到 Handle Locator , 请检查Visualizer是否选择正确或者层级是否有问题")
cmds.warning(u"没有找到 Handle Locator , 请检查Visualizer是否选择正确或者层级是否有问题")
return
# Note UndoChunk Open
cmds.undoInfo(ock=1)
# Note 添加运动表达式
ExpressionStr = """
{0}.translateY = min(max({1},{3}),{4});
{0}.translateX = (frame-1)/{2};
""".format(Handle,Attr,SpeedAttr,MinVal,MaxVal)
ExpressionNode = cmds.expression(s=ExpressionStr,o=Handle,ae=1,uc="all")
# Note 生成运动曲线命令
Start = cmds.playbackOptions(q=1,min=1)
End = cmds.playbackOptions(q=1,max=1)
TotalCount = abs(End - Start)
Command = ""
posStr = ""
cmds.progressWindow( title=u'生成曲线',
progress=0,
status=u'生成曲线中...',
isInterruptable=True )
amount = 0.0
AnnotationGrp = []
cmds.currentTime(Start)
transform = cmds.xform(Handle,q=1,ws=1,t=1)
tx = transform[0]
ty = transform[1]
tz = transform[2]
StartAnnotation = cmds.annotate( Digits, tx='%s' % Start,p=(tx , ty, tz) )
AnnotationGrp.append(StartAnnotation)
while Start <= End:
# 进度条显示
if cmds.progressWindow( query=True, isCancelled=True ) :
cmds.progressWindow(endProgress=1)
cmds.undoInfo(cck=1)
return
cmds.progressWindow( edit=True, progress=amount/TotalCount*100 )
amount += 1
cmds.currentTime(Start)
transform = cmds.xform(Handle,q=1,ws=1,t=1)
tx = transform[0]
ty = transform[1]
tz = transform[2]
if not Start == End:
posStr += "(%s, %s, %s)," % (tx,ty,tz)
else:
posStr += "(%s, %s, %s)" % (tx,ty,tz)
if ty == MinVal or ty == MaxVal:
cmds.currentTime(Start+1)
transform = cmds.xform(Handle,q=1,ws=1,t=1)
if not ty == transform[1]:
Annotation = cmds.annotate( Digits, tx='%s' % Start,p=(tx , ty, tz) )
AnnotationGrp.append(Annotation)
cmds.currentTime(Start-1)
transform = cmds.xform(Handle,q=1,ws=1,t=1)
if not ty == transform[1]:
Annotation = cmds.annotate( Digits, tx='%s' % Start,p=(tx , ty, tz) )
AnnotationGrp.append(Annotation)
Start += 1
cmds.progressWindow(endProgress=1)
Command = "cmds.curve(d=1,p=[%s])" % posStr
# Note 生成运动曲线
CrvNode = eval(Command)
# Note 生成文字提示
EndAnnotation = cmds.annotate( Digits, tx='%s' % End,p=(tx , ty, tz) )
AnnotationGrp.append(EndAnnotation)
tx = cmds.polyEvaluate( Digits,b=True )[0][0]
ty = cmds.polyEvaluate( Digits,b=True )[1][0]
tz = 0
AttrAnnotation = cmds.annotate( Digits, tx='%s' % Attr,p=(tx , ty-2, tz) )
# Note 添加曲线运动 Expression
ExpressionStr = "%s.translateX = -(frame-1)/%s;" % (CrvNode,SpeedAttr)
cmds.expression(ExpressionNode,e=1,s=ExpressionStr)
# Note 整理
cmds.currentTime(cmds.playbackOptions(q=1,min=1))
cmds.setAttr("%s.tx"%Handle,0)
cmds.setAttr("%s.ty"%Handle,0)
cmds.setAttr("%s.tz"%Handle,0)
cmds.parent( CrvNode, Visualizer, relative=True )
cmds.parent( AttrAnnotation, Digits, relative=True )
cmds.setAttr("%s.displayArrow"%AttrAnnotation,0)
for Annotation in AnnotationGrp:
cmds.parent( Annotation, CrvNode, relative=True )
cmds.setAttr("%s.displayArrow"%Annotation,0)
cmds.setAttr("%s.overrideEnabled"%CrvNode,1)
cmds.setAttr("%s.overrideColor"%CrvNode,17)
# Note 拼接到摄像机上
pnCns = cmds.parentConstraint(Cam,Visualizer,mo=0)[0]
cmds.move( 0, -2, -10, Visualizer, r=1 , os=1 , wd=1 )
cmds.scale( 0.15, 0.15, 0.15, Visualizer, r=1 )
# Note 保持当前偏移
cmds.parentConstraint(Cam,pnCns,e=1,mo=1)
# Note UndoChunk Close
cmds.undoInfo(cck=1)
def init_UI():
if cmds.window("Cam_Visualizer_Toolkit",ex=1):
cmds.deleteUI("Cam_Visualizer_Toolkit")
cmds.window("Cam_Visualizer_Toolkit",t=u"速度属性生成显示工具")
cmds.columnLayout( adj=1,columnAttach=('both', 15), rowSpacing=15, columnWidth=50 )
cmds.separator()
cmds.rowLayout( numberOfColumns=2,adjustableColumn=2)
cmds.text(l=u"计算精度",w=100)
AccuracyField = cmds.floatField(v=0.0001,minValue=0.0001,maxValue=1)
cmds.setParent("..")
cmds.button(l=u"位移速度属性",c="Translate_Value_Generate('%s')" % AccuracyField)
cmds.button(l=u"位移加速度属性",c="Translate_Accel_Value_Generate('%s')" % AccuracyField)
cmds.button(l=u"旋转速度属性",c="Rotate_Value_Generate('%s')" % AccuracyField)
cmds.button(l=u"旋转加速度属性",c="Rotate_Accel_Value_Generate('%s')" % AccuracyField)
cmds.separator()
cmds.columnLayout( adj=1, rowSpacing=5, columnWidth=50 )
cmds.button(l="导入物体",c="cmds.Import()")
cmds.rowLayout( numberOfColumns=2,adjustableColumn=1)
Visualizer = cmds.textField(pht=u"选择导入的 Visualizer 物体 点击右侧按钮选择")
cmds.button(l="<<<",w=50,c="selectObj('%s')" % Visualizer)
cmds.setParent("..")
cmds.rowLayout( numberOfColumns=2,adjustableColumn=1)
Cam = cmds.textField(pht=u"选择导入的 摄像机 物体 点击右侧按钮选择")
cmds.button(l="<<<",w=50,c="selectObj('%s')" % Cam)
cmds.setParent("..")
cmds.rowLayout( numberOfColumns=2,adjustableColumn=2)
cmds.text(l=u"输入要显示的属性",w=100)
Attr = cmds.textField(pht=u"例;pCube1.tx")
cmds.setParent("..")
cmds.rowLayout( numberOfColumns=2,adjustableColumn=2)
cmds.text(l=u"输入曲线刷新速度",w=100)
SpeedAttr = cmds.floatField(v=10)
cmds.setParent("..")
cmds.rowLayout( numberOfColumns=2,adjustableColumn=2)
cmds.text(l=u"最大值",w=100)
MaxVal = cmds.floatField(v=5)
cmds.setParent("..")
cmds.rowLayout( numberOfColumns=2,adjustableColumn=2)
cmds.text(l=u"最小值",w=100)
MinVal = cmds.floatField(v=-5)
cmds.setParent("..")
cmds.button(l="一键生成",c="Connect_Visualizer('%s','%s','%s','%s','%s','%s')" % (Visualizer,Cam,Attr,SpeedAttr,MinVal,MaxVal))
cmds.setParent("..")
cmds.separator()
cmds.showWindow()
init_UI()
|
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
inputs1 = tf.keras.Input(shape=(32,), name="input_1")
inputs2 = tf.keras.Input(shape=(32,), name="input_2")
x1 = layers.Dense(64)(inputs1, name="dense_1")
x2 = layers.Dense(64)(inputs2, name="dense_2")
print(tf.shape(x1))
x = tf.concat([x1, x2], axis=-1)
print(tf.shape(x))
outputs1 = layers.Dense(10)(x)
outputs2 = layers.Dense(10)(x)
model = tf.keras.Model(inputs=[inputs1, inputs2], outputs=[outputs1, outputs2])
model.compile(optimizer=tf.keras.optimizers.Adam(0.01), loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True), metrics=["accuracy"])
x_1_train = np.random.random((1000,32))
x_2_train = np.random.random((1000,32))
y_1_train = np.random.random((1000,10))
y_2_train = np.random.random((1000,10))
x_1_eval = np.random.random((200,32))
x_2_eval = np.random.random((200,32))
y_1_eval = np.random.random((200,10))
y_2_eval = np.random.random((200,10))
x_1_test = np.random.random((100,32))
x_2_test = np.random.random((100,32))
y_1_test = np.random.random((100,10))
y_2_test = np.random.random((100,10))
model.fit((x_1_train, x_2_train), (y_1_train, y_2_train), batch_size=64, epochs=5, validation_data=((x_1_eval, x_2_eval), (y_1_eval, y_2_eval)))
eval_results = model.evaluate((x_1_test, x_2_test), (y_1_test, y_2_test), batch_size=128)
print(eval_results)
x_1_pred = np.random.random((50, 32))
x_2_pred = np.random.random((50, 32))
predicts = model.predict((x_1_pred, x_2_pred))
print(predicts) |
from copy import deepcopy
class Solution(object):
def __init__(self):
self.maxi = -1
self.visited = []
def getMaximumGold(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
def in_matrix(row, col):
return 0 <= row < len(grid) and 0 <= col < len(grid[0])
def dfs(row, col, max):
# print("row =", row)
# print("col = ", col)
# print("len grid = ", len(grid))
# print(in_matrix(row, col))
if in_matrix(row, col) and not self.visited[row][col] and grid[row][col] != 0:
self.visited[row][col] = True
for r, c in [(row + 1, col), (row - 1, col), (row, col + 1), (row, col - 1)]:
if in_matrix(r, c):
dfs(r, c, max + grid[r][c])
if max > self.maxi:
self.maxi = max
self.visited[row][col] = False
self.visited = []
for i in range(len(grid)):
self.visited.append([False] * len(grid[0]))
for row in range(len(grid)):
for col in range(len(grid[0])):
# print(self.visited)
# print()
if grid[row][col] != 0:
dfs(row, col, grid[row][col])
print(self.maxi)
return self.maxi
s = Solution()
s.getMaximumGold(
grid=[[1, 0, 7, 0, 0, 0], [2, 0, 6, 0, 1, 0], [3, 5, 6, 7, 4, 2], [4, 3, 1, 0, 2, 0], [3, 0, 5, 0, 20, 0]])
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def reorderList(self, head: ListNode) -> None:
"""
Do not return anything, modify head in-place instead.
"""
if not head or not head.next:
return
fast, slow = head, head
while fast and fast.next and fast.next.next:
fast = fast.next.next
slow = slow.next
l2 = slow.next
l1 = head
slow.next = None
l2 = self.reserve(l2)
while l2:
l2_head = l2
l2 = l2.next
l2_head.next = l1.next
l1.next = l2_head
l1 = l1.next.next
def reserve(self, head):
l = head
new_head = head
while l.next:
temp = l.next
l.next = l.next.next
temp.next = new_head
new_head = temp
return new_head
# 给定一个单链表 L:L0→L1→…→Ln-1→Ln ,
# 将其重新排列后变为: L0→Ln→L1→Ln-1→L2→Ln-2→…
l = ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5)))))
Solution().reorderList(l)
while(l):
print(l.val)
l = l.next |
# -*- coding: utf-8 -*-
import codecs
import os
import sys
"""
Visit http://pyltp.readthedocs.io/zh_CN/latest/api.html
Download the model needed for pylty to run correctly
Remember to change the directories below to where you put the downloaded files
"""
LTP_DATA_DIR = '/Users/Herman/Documents/BOE/pyltp/ltp_data' # Modify this dir
cws_model_path = os.path.join(LTP_DATA_DIR, 'cws.model')
pos_model_path = os.path.join(LTP_DATA_DIR, 'pos.model')
par_model_path = os.path.join(LTP_DATA_DIR, 'parser.model')
# Modify this dir to where all_nlp is
# You may run createAll.py in a folder with all input nlp files to obtain
# all_nlp
LEXICON_PATH = '/Users/Herman/Documents/BOE/project.git/app/hospital_guide_robot/backends/nlp_haozhe/all_nlp'
from pyltp import Segmentor
from pyltp import Postagger
from pyltp import Parser
class Extractor:
def __init__(self):
self.segmentor = Segmentor()
self.segmentor.load_with_lexicon(cws_model_path, LEXICON_PATH)
self.postagger = Postagger()
self.postagger.load(pos_model_path)
self.parser = Parser()
self.parser.load(par_model_path)
# Load all lists with the input files
self.organ_id_dict = self.create_id_dict('organ_nlp')
self.organ_word_dict = self.create_word_dict('organ_nlp')
self.location_list = self.load_list('location_nlp')
self.tissue_id_dict = self.create_id_dict('tissue_nlp')
self.tissue_word_dict = self.create_word_dict('tissue_nlp')
self.indicator_id_dict = self.create_id_dict('indicator_nlp')
self.indicator_word_dict = self.create_word_dict('indicator_nlp')
self.nutrition_list = self.load_list('nutrition_nlp')
self.problem_id_dict = self.create_id_dict('problem_nlp')
self.problem_word_dict = self.create_word_dict('problem_nlp')
self.severity_dict = self.load_dict('severity_nlp')
self.suddenness_dict = self.load_dict('suddenness_nlp')
self.frequency_dict = self.load_dict('frequency_nlp')
self.time_dict = self.load_dict('time_nlp')
self.disease_list = self.load_list('disease_nlp')
self.negative_list = self.load_list('negative_nlp')
self.appearance_id_dict = self.create_id_dict('appearance_nlp')
self.appearance_word_dict = self.create_word_dict('appearance_nlp')
self.nutrition_list = self.load_list('nutrition_nlp')
self.function_id_dict = self.create_id_dict('function_nlp')
self.function_word_dict = self.create_word_dict('function_nlp')
# These temp variables are used in helper methods to prevent duplicated
# content from being extracted
self.prob_temp = []
self.form_temp = []
self.in_prob_list = []
# This method resets the temp variables, so as to ensure the accuracy of the
# results extracted
def reset(self):
self.prob_temp = []
self.form_temp = []
self.in_prob_list = []
# Extract all keywords and return a tuple
def extract_tuple(self, description):
print(description)
words = self.segmentor.segment(description)
postags = self.postagger.postag(words)
arcs = self.parser.parse(words, postags)
print('\t'.join(str(i + 1) for i in range(0, len(list(words)))))
print('\t'.join(word for word in list(words)))
print('\t'.join("%d:%s" % (arc.head, arc.relation) for arc in arcs))
num_of_tuple = self.find_num_of_tuple(words, arcs)
tuples = []
for num in range(num_of_tuple):
tuples.append(self.construct_tuple(description, num))
tuples = self.del_extra(tuples)
return tuples
# Only extract the major components
def extract_simple_tuple(self, description):
words = self.segmentor.segment(description)
postags = self.postagger.postag(words)
arcs = self.parser.parse(words, postags)
"""
print('\t'.join(str(i + 1) for i in range(0, len(list(words)))))
print('\t'.join(word for word in list(words)))
print('\t'.join("%d:%s" % (arc.head, arc.relation) for arc in arcs))
"""
#words = description
num_of_tuple = self.find_num_of_tuple(words, arcs)
tuples = []
for num in range(num_of_tuple):
tuples.append(self.construct_simple_tuple(description, num))
return tuples
# Extract all keywords and return a dict
def extract_dict(self, description):
print(description)
words = self.segmentor.segment(description)
postags = self.postagger.postag(words)
arcs = self.parser.parse(words, postags)
print('\t'.join(str(i + 1) for i in range(0, len(list(words)))))
print('\t'.join(word for word in list(words)))
print('\t'.join("%d:%s" % (arc.head, arc.relation) for arc in arcs))
num_of_tuple = self.find_num_of_tuple(words, arcs)
tuples = []
for num in range(num_of_tuple):
tuples.append(self.construct_tuple(description, num))
tuples = self.del_extra(tuples)
final = []
for each in tuples:
result = self.change_tuple_to_dict(each)
final.append(result)
return final
# Helper method to change a tuple to a dict
def change_tuple_to_dict(self, in_tuple):
result = {}
result['Organ'] = in_tuple[0]
result['Location'] = in_tuple[1]
result['Tissue'] = in_tuple[2]
result['Indicator'] = in_tuple[3]
result['Problem'] = in_tuple[4]
result['Form'] = in_tuple[5]
result['Severity'] = in_tuple[6]
result['Not included'] = in_tuple[7]
result['Suddenness'] = in_tuple[8]
result['Frequency'] = in_tuple[9]
result['Time'] = in_tuple[10]
result['Trigger'] = in_tuple[11]
result['Worsening Factor'] = in_tuple[12]
result['Relief Factor'] = in_tuple[13]
result['History'] = in_tuple[14]
result['Suspect'] = in_tuple[15]
result['Eliminate'] = in_tuple[16]
result['Appearance'] = in_tuple[17]
result['Nutrition'] = in_tuple[18]
result['Function'] = in_tuple[19]
return result
# 用词典过滤多余词条
def del_extra(self, tuples):
copy = []
for ele in tuples:
if ele == None:
continue
if len(ele[4]) == 0:
continue
if ele[4][0].find('_') >= 0:
copy.append(ele)
return copy
# This is the most important method in this class
# It constructs a tuple with the necessary information extracted from the
# description of a patient
def construct_tuple(self, description, num):
"""
Tuple格式如下:
( [器官A,器官B ... ], [ [器官A方位],[器官B方位] ...],
[人体组织],
[人体指标], [问题或感觉], [问题或感觉的形式],
int(问题或感觉的程度), [不包含症状], int(症状出现缓急),
int(症状出现频率), int(症状出现时间), int(症状持续时间),
str(起因,暂不填写), str(加剧因素,暂不填写),
str(缓解因素,暂不填写),
[病史], [怀疑疾病], [排除疾病]
[表现出的问题(例如疙瘩、臭味等)], [营养], [人体功能])
"""
res = ()
words = self.segmentor.segment(description)
postags = self.postagger.postag(words)
arcs = self.parser.parse(words, postags)
# Initialize all elements in the tuple
organs = []
location = []
tissue = []
indicator = []
problem = []
form = []
severity = -1
not_included = []
suddenness = -1
frequency = -1
time = -1
trigger = '起因暂无' # Not considered in this phase
worsen = '加剧因素暂无' # Not considered in this phase
relief = '缓解因素暂无' # Not considered in this phase
history = []
suspect = []
eliminate = []
appearance = []
nutrition = []
function = []
# Iterate through every single word obtained from ltp methods
for i in range(len(words)):
# Get organs and their locations
if words[i] in self.organ_word_dict.keys():
id_val = self.organ_word_dict.get(words[i])
word = self.organ_id_dict.get(id_val)
if not ('O_' + word) in organs:
organs.append('O_' + word)
temp_location = self.find_location(words, arcs, i)
location.append(temp_location)
# Get tissue
elif words[i] in self.tissue_word_dict.keys():
id_val = self.tissue_word_dict.get(words[i])
word = self.tissue_id_dict.get(id_val)
if not ('T_' + word) in tissue:
tissue.append('T_' + word)
# Get indicator
elif words[i] in self.indicator_word_dict.keys():
id_val = self.indicator_word_dict.get(words[i])
word = self.indicator_id_dict.get(id_val)
if not ('I_' + word) in indicator:
indicator.append('I_' + word)
# Get appearance
elif words[i] in self.appearance_word_dict.keys():
id_val = self.appearance_word_dict.get(words[i])
word = self.appearance_id_dict.get(id_val)
if not ('A_' + word) in appearance:
appearance.append('A_' + word)
# 处理有否定含义的信息
elif words[i] in self.negative_list:
self.find_not_included(list(words), i, words[i], arcs, not_included)
# Get time
elif words[i] in self.time_dict:
time = max(int(self.time_dict.get(words[i])), time)
# Get nutrition
elif words[i] in self.nutrition_list and not words[i] in nutrition:
nutrition.append('N_' + words[i])
# Get function
elif words[i] in self.function_word_dict.keys():
id_val = self.function_word_dict.get(words[i])
word = self.function_id_dict.get(id_val)
if not ('F_' + word) in function:
function.append('F_' + word)
problem.append(self.find_problem(words, arcs, num))
form = self.find_form(words, arcs, num)
severity = self.find_severity(words, arcs, num)
frequency = self.find_frequency(words, arcs, num)
suddenness = self.find_suddenness(words, arcs, num)
# Remove the duplicates
for ele in not_included:
if ele in problem:
problem.remove(ele)
return None
if ele in organs:
index = organs.index(ele)
location.remove(location[index])
organs.remove(ele)
if ele in tissue:
tissue.remove(ele)
if ele in indicator:
indicator.remove(ele)
history = self.find_disease(description, 'history_nlp')
suspect = self.find_disease(description, 'suspect_nlp')
eliminate = self.find_disease(description, 'eliminate_nlp')
# Delete the problems that also appear in other components of the tuple
delete = False
for ele in history:
if ele.find(problem[0]) >= 0 or problem[0].find(ele) >= 0:
delete = True
for ele in suspect:
if ele.find(problem[0]) >= 0 or problem[0].find(ele) >= 0:
delete = True
for ele in eliminate:
if ele.find(problem[0]) >= 0 or problem[0].find(ele) >= 0:
delete = True
if delete:
problem = []
# Formatting the output of tuple
# If you are debugging this part, you may want to look for
# format_tuple method to uncomment some lines, so that more contents in
# the tuple get printed out
res = self.format_tuple(res, organs, 'Organ')
res = self.format_tuple(res, location, 'Location')
res = self.format_tuple(res, tissue, 'Tissue')
res = self.format_tuple(res, indicator, 'Indicator')
res = self.format_tuple(res, problem, 'Problem')
res = self.format_tuple(res, form, 'Form')
res += (severity,)
res = self.format_tuple(res, not_included, 'Not_included')
res += (suddenness,) + (frequency,) + (time,)
res += (trigger,) + (worsen,) + (relief,)
res = self.format_tuple(res, history, 'History')
res = self.format_tuple(res, suspect, 'Suspect')
res = self.format_tuple(res, eliminate, 'Eliminate')
res = self.format_tuple(res, appearance, 'Appearance')
res = self.format_tuple(res, nutrition, 'Nutrition')
res = self.format_tuple(res, function, 'Function')
return res
# A simple version of construct_tuple, only obtaining the key features
def construct_simple_tuple(self, description, num):
res = ()
words = self.segmentor.segment(description)
postags = self.postagger.postag(words)
arcs = self.parser.parse(words, postags)
# Initialize all elements in the tuple
organs = []
tissue = []
indicator = []
problem = []
appearance = []
nutrition = []
function = []
# One Chinese char will be one unit here
#words = description
# Iterate through every single word obtained from ltp
for i in range(len(words)):
# Get organs and their locations
if words[i] in self.organ_word_dict.keys():
id_val = self.organ_word_dict.get(words[i])
word = self.organ_id_dict.get(id_val)
if not ('O_' + word) in organs:
organs.append('O_' + word)
# Get tissue
elif words[i] in self.tissue_word_dict.keys():
id_val = self.tissue_word_dict.get(words[i])
word = self.tissue_id_dict.get(id_val)
if not ('T_' + word) in tissue:
tissue.append('T_' + word)
# Get indicator
elif words[i] in self.indicator_word_dict.keys():
id_val = self.indicator_word_dict.get(words[i])
word = self.indicator_id_dict.get(id_val)
if not ('I_' + word) in indicator:
indicator.append('I_' + word)
# Get appearance
elif words[i] in self.appearance_word_dict.keys():
id_val = self.appearance_word_dict.get(words[i])
word = self.appearance_id_dict.get(id_val)
if not ('A_' + word) in appearance:
appearance.append('A_' + word)
# Get nutrition
elif words[i] in self.nutrition_list and not words[i] in nutrition:
nutrition.append(words[i])
# Get function
elif words[i] in self.function_word_dict.keys():
id_val = self.function_word_dict.get(words[i])
word = self.function_id_dict.get(id_val)
if not ('F_' + word) in function:
function.append('F_' + word)
problem.append(self.find_problem(words, arcs, num))
# Formatting the output of tuple
# All internal lists are converted to a tuple in order to use as keys in
# another test
res = self.format_tuple(res, tuple(organs), 'Organ')
res = self.format_tuple(res, tuple(tissue), 'Tissue')
res = self.format_tuple(res, tuple(indicator), 'Indicator')
res = self.format_tuple(res, tuple(problem), 'Problem')
res = self.format_tuple(res, tuple(appearance), 'Appearance')
res = self.format_tuple(res, tuple(nutrition), 'Nutrition')
res = self.format_tuple(res, tuple(function), 'Function')
return res
# This method is created for debugging purpose
# Uncomment the if-else loop to print out 'XXXX list is empty'
def format_tuple(self, res, in_list, name):
#if len(in_list) == 0:
# res += ([ name + ' list is empty'],)
#else:
# res += (in_list,)
res += (in_list,)
return res
# Helper method to find the word index of head
def find_head_index(self, arcs):
k = 0
head_index = 0
for arc in arcs:
if arc.head == 0:
head_index = k
break
k += 1
return head_index
# Helper method to find all children of the current word
# i is the index of curr_word in [words]
def find_children(self, words, i, curr_word, arcs):
children = []
k = 0
for arc in arcs:
if arc.head == i + 1 and arc.relation != 'COO' and arc.relation != 'WP':
children.append((words[k], k))
k += 1
return children
# Method to find the suddenness component
def find_suddenness(self, words, arcs, num):
suddenness = -1
j = -1
k = 0
temp = []
head = 0
# Add all problems to temp and find the current one using the
# parameter 'num'
for word in words:
if word in self.problem_word_dict.keys() and not word in temp:
temp.append(word)
j += 1
# Find head of the current problem
if j == num:
head = k
break
k +=1
# Find all its children and find the word in suddenness_dict
children = self.find_children(words, head, words[head], arcs)
for child, index in children:
if child in self.suddenness_dict:
suddenness = int(self.suddenness_dict.get(child))
return suddenness
# Similar method as find_suddenness, with the sole difference that the
# output is the frequency.
def find_frequency(self, words, arcs, num):
frequency = -1
j = -1
k = 0
temp = []
head = 0
for word in words:
if word in self.problem_word_dict.keys() and not word in temp:
temp.append(word)
j += 1
if j == num:
head = k
break
k +=1
children = self.find_children(words, head, words[head], arcs)
for child, index in children:
if child in self.frequency_dict:
frequency = int(self.frequency_dict.get(child))
return frequency
# Similar method as find_suddenness, with the sole difference that the ouput
# is the severity.
def find_severity(self, words, arcs, num):
severity = -1
j = -1
k = 0
temp = []
head = 0
for word in words:
if word in self.problem_word_dict.keys() and not word in temp:
temp.append(word)
j += 1
if j == num:
head = k
break
k +=1
children = self.find_children(words, head, words[head], arcs)
for child, index in children:
if child in self.severity_dict:
severity = int(self.severity_dict.get(child))
return severity
# Method to find form of the problem
def find_form(self, words, arcs, num):
form = []
j = -1
k = 0
temp = []
head = 0
for word in words:
if word in self.problem_word_dict.keys() and not word in temp:
temp.append(word)
j += 1
if j == num:
head = k
break
k += 1
children = self.find_children(words, k, words[k], arcs)
for child, index in children:
if arcs[index].relation =='ADV' or arcs[index].relation == 'VOB' or arcs[index].relation =='CMP':
if not child in self.form_temp and not child in self.location_list:
form.append(child)
self.form_temp.append(child)
return form
# 处理否定含义的信息
# Recursive method that gets all children of the negative word e.g.
# ‘不’,‘没有’,‘无’ 等
def find_not_included(self, words, i, curr_word, arcs, not_included):
children = self.find_children(words, i, curr_word, arcs)
copy = [] # copy of children
for ele in children:
copy.append(ele)
for ele, index in copy:
if arcs[index].relation == 'ADV':
children.remove((ele, index))
# Base case
if arcs[i].relation != 'ADV':
if len(children) == 0:
return
temp1 = []
temp2 = []
index_list = []
# If curr_word has a relation of ADV to its parent node, add its parent
# to not_included. Then add all children (including grandchildren and
# more) of its parent to not_included.
if arcs[i].relation == 'ADV' and not words[arcs[i].head - 1] in temp1:
if arcs[arcs[i].head - 1].head != 0:
curr = words[arcs[i].head - 1]
temp1.append(curr)
if curr in self.organ_word_dict.keys():
not_included.append('O_' + curr)
elif curr in self.location_list:
not_included.append('L_' + curr)
elif curr in self.tissue_word_dict.keys():
not_included.append('T_' + curr)
elif curr in self.indicator_word_dict.keys():
not_included.append('I_' + curr)
elif curr in self.problem_word_dict.keys():
not_included.append('P_' + curr)
else:
not_included.append(curr)
i += 1
# If curr_word is not an adverb, add all its childre (including
# grandchildren and more) to not_included
for child, index in children:
#if arcs[index].relation == 'ADV':
#continue
if not child in temp2 and child != '不':
if child in self.organ_word_dict.keys():
not_included.append('O_' + child)
elif child in self.location_list:
not_included.append('L_' + child)
elif child in self.tissue_word_dict.keys():
not_included.append('T_' + child)
elif child in self.indicator_word_dict.keys():
not_included.append('I_' + child)
elif child in self.problem_word_dict.keys():
not_included.append('P_' + child)
else:
not_included.append(child)
temp2.append(child)
# Recursive call
k = 0
for each in temp2:
self.find_not_included(words, children[k][1], each, arcs, not_included)
k += 1
# Find location words
def find_location(self, words, arcs, organ_index):
location = []
k = 0
for arc in arcs:
if k == organ_index:
head_val = arc.head
if words[head_val - 1] in self.location_list:
location.append('L_' + words[head_val - 1])
elif arc.relation == 'ATT':
j = 0
for arc2 in arcs:
if arc2.head == head_val and arc2.relation == 'COO':
if words[j] in self.location_list:
location.append('L_' + words[j])
j += 1
k += 1
return location
# Find the words that describe problem
def find_problem(self, words, arcs, num):
problem = []
temp = []
for word in words:
if word in self.problem_word_dict.keys():
id_val = self.problem_word_dict.get(word)
word = self.problem_id_dict.get(id_val)
temp.append(word)
problem.append('P_' + word)
return problem[num]
# Find disease that match one of our recorded disease names from input_file
def find_disease(self, description, input_file):
for punc in [u'。',' ',u'!',u'?',u';',u':',',','.','?','!']:
description.replace(punc, ',')
desc_list = description.split(',')
keyword_list = self.load_list(input_file)
result = []
for each in desc_list:
for ele in keyword_list:
if each.find(ele.rstrip()) >= 0:
words = self.segmentor.segment(each)
for word in words:
if word in self.disease_list and not ('D_' + word) in result:
result.append('D_' + word)
return result
# Find subject words
def find_subject(self, words, arcs):
subject = []
head_index = self.find_head_index(arcs)
j = 0
for arc in arcs:
if arc.head == head_index + 1 and arc.relation == 'SBV':
subject.append(words[j])
j += 1
return subject
# Helper method to determine how many tuple are produced in the end
def find_num_of_tuple(self, words, arcs):
temp = []
j = 0
for word in words:
if word in self.problem_word_dict.keys():
if not word in temp:
temp.append(word)
j += 1
return j
# Helper method to load a list from a file
def load_list(self, inFile):
res_list = []
temp_list = codecs.open(inFile, 'r', 'utf-8').readlines()
for ele in temp_list:
temp = ele.split()[0].rstrip()
res_list.append(temp)
return res_list
"""
def load_list_with_syn(sef, inFile):
res_list = []
temp_list = codecs.open(inFile, 'r', 'utf-8').readlines()
i = 0
for ele in temp_list:
temp = ele.rstrip().split()
for each in temp:
res_list.append((each, i)) # each is the word, i is the id
i += 1
return res_list
"""
def create_id_dict(self, inFile):
res_dict = {} # key is id, val is the word
temp_list = codecs.open(inFile, 'r', 'utf-8').readlines()
i = 0
for ele in temp_list:
temp = ele.rstrip().split()[0]
res_dict[i] = temp
i += 1
return res_dict
def create_word_dict(self, inFile):
res_dict = {} # key is word, val is id
temp_list = codecs.open(inFile, 'r', 'utf-8').readlines()
for i in range(len(temp_list)):
words = temp_list[i].split()
for ele in words:
res_dict[ele] = i
return res_dict
# Helper method to laod a dictionary from a file
def load_dict(self, inFile):
res_dict = {}
temp_list = codecs.open(inFile, 'r', 'utf-8').readlines()
for ele in temp_list:
key = ele.split()[0].rstrip()
val = ele.split()[-1].rstrip()
res_dict[key] = val
return res_dict
# Method to release the modles. Call this at the end of the program
def release(self):
self.segmentor.release()
self.postagger.release()
self.parser.release()
# Example of how to use Extractor
if __name__ == '__main__':
extractor = Extractor()
i = 0
print()
print('Do you wish to conduct a long test or a short test?')
print('Long test will use symptom_input.txt as input to test 100 haodf input, while short test will allow you to test up to 3 input from stdin.')
ans = input('Type \'L\' or \'l\' for long, \'S\'or \'s\' for short: ')
if ans == 'S' or ans == 's':
while i < 3:
userInput = input(u'请输入: ')
if len(userInput) == 0:
i += 1
continue
res = extractor.extract_simple_tuple(userInput)
for ele in res:
print(ele)
print()
i += 1
# REMEMBER to reset after extracting keywords from a sentence
extractor.reset()
extractor.release()
elif ans == 'L' or ans == 'l':
f = codecs.open('symptom_input.txt', 'r', 'utf-8')
for line in f.readlines():
res = extractor.extract_dict(line)
for ele in res:
print(ele)
print()
extractor.reset()
extractor.release()
else:
print('Invalid input. Please rerun the program and type l or s.')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import struct
import sys
import os
import subprocess
import re
objdump_cmd="objdump"
objdump_param = ('-color', '-macho' ,'-source' ,'-mcpu=haswell' ,'-section=__const' ,'-section=__cstring' ,'-triple=x86_64-apple-darwin17.5.0')
rip_re = re.compile('(\d+)[(][%]rip[)]')
def sd_to_string(value):
hex_v = ''.join(x for x in value)
double_value = struct.unpack('>d', struct.pack('Q', int(hex_v,16)))
string_value = '%.7f' % double_value
return string_value
def fine_vmovsd(lines, addr):
for line in lines:
dt = line.split('\t')
if len(dt)<2:
continue
try:
d_v = dt[0].replace(':','')
base_addr = int(d_v,16)
# if base_addr == 0x10000f2a0:
# value = dt[1].split(' ')[:8]
# hex_v = ''.join(x for x in value)
# print base_addr==addr, hex_v,sd_to_string(value)
#print '0=',dt[0], base_addr, addr
if base_addr == addr:
value = dt[1].split(' ')[:8]
#print value
return sd_to_string(value)
elif base_addr + 8 == addr:
value = dt[1].split(' ')[8:16]
#print value
return sd_to_string(value)
except:
continue
print('not find ', hex(addr))
return hex(addr)
def disam(app_path):
params = list()
params.append(objdump_cmd)
params.extend(objdump_param)
params.append(app_path)
child = subprocess.Popen(params, stdout=subprocess.PIPE)
asm_source = child.communicate()[0]
with open(app_path+".o.asm", 'w') as f:
f.write(asm_source)
f.close()
#print asm_source
fine_source=""
lines = asm_source.split('\n')
# for line in lines:
# print line
for line in lines:
rel_pos = 0
result = rip_re.search(line)
if result:
rel_pos = int(result.groups()[0])
dt=line.split('\t')
ins_len = len(dt[1].strip().split(' '))
base_addr = int(dt[0][:-1],16)
value_addr = rel_pos+base_addr+ins_len
v_result = hex(value_addr)
if line.count('vmovsd') :
#print value_addr, v_result
v_result = fine_vmovsd(lines, value_addr)
fine_source += line[:result.start()] + v_result + line[result.end():] +"\n"
else:
fine_source += line + "\n"
return fine_source
if __name__ == "__main__":
if len(sys.argv) == 1:
print("""usage: disam <your app>\n""")
exit(0)
app = sys.argv[1]
app_path = os.path.abspath(app)
print(app)
asm_source = disam(app_path)
asm_file = app_path + ".asm"
with open(asm_file, 'w') as f:
f.write(asm_source)
f.close()
|
"""
Collect most recent data from SetTrade.com web site on all stocks.
Save to JSON file.
"""
import requests
from bs4 import BeautifulSoup
import os, os.path
from datetime import datetime
import json
ts = datetime.now()
dataFolder = os.path.join(os.path.abspath("."),"""market-data""")
baseURL = """http://www.settrade.com"""
startPage = """http://www.settrade.com/C13_MarketSummary.jsp?detail=STOCK_TYPE&order=N&market=SET&type=S"""
snapshotFile = os.path.join(dataFolder, "snapshot" + ts.strftime("%Y%m%d_%H%M") + ".dat")
r = requests.get(startPage)
# check content received
print r.status_code
# print r.content[:100]
# Save the index page
indexFile = os.path.join(dataFolder, "market" + ts.strftime("%Y%m%d_%H%M") + ".html")
with open(indexFile, "w") as f:
f.write(r.content)
# load into BeautifulSoup
soup = BeautifulSoup(r.content, 'html.parser')
# print soup
def get_quotes_links(soup):
links = soup.find_all("a")
for link in links:
if link.has_attr('href'):
if link.attrs['href'].startswith("/C13_FastQuote_Main.jsp"):
print link.get_text()
print link.attrs
yield link
print datetime.now().strftime("%Y%m%d_%H%M")
market_data = {}
market_data['src'] = startPage
market_data['ts'] = str(ts)
market_data['symbols'] = []
market_data['active'] = []
market_data['flagged'] = []
for link in get_quotes_links(soup):
print link
print link.parent.parent.get_text()
raw_data_cell = link.parent.parent.get_text().split('\n')
raw_data_cell.remove('') # realign cell. Remove empty element.
symbol = raw_data_cell[0].encode('ascii',errors='ignore') # Remove non ascii chars
price_latest = raw_data_cell[4]
ok = True
if '<' in symbol: # for flaged stocks
symbol = symbol[:symbol.find('<')]
ok = False
n = raw_data_cell[0].encode('ascii',errors='ignore')
flag = n[n.find('<'):]
# add to all symbols
market_data['symbols'].append(symbol)
if ok:
issue = {}
issue['symbol'] = symbol
issue['price_latest'] = price_latest
market_data['active'].append(issue)
else:
issue = {}
issue['symbol'] = symbol
issue['flag'] = flag
issue['price_latest'] = price_latest
market_data['flagged'].append(issue)
# Save the data file in json
dataFile = os.path.join(dataFolder, "symbols" + ts.strftime("%Y%m%d") + ".json")
with open(dataFile, "w") as f:
f.write(json.dumps(market_data, sort_keys=True, indent=4, separators=(',', ': ')))
|
#!/bin/env python3
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# A copy of the GNU General Public License is available at
# http://www.gnu.org/licenses/gpl-3.0.html
"""OTU clustering"""
import argparse
import sys
import os
import gzip
from collections import Counter
import numpy
# import nwalign3 as nw
__author__ = "Aurore WILBRINK"
__copyright__ = "CY Tech"
__credits__ = ["Aurore WILBRINK"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Aurore Wilbrink"
__email__ = "wilbrinkau@eisti.eu"
__status__ = "Developpement"
def isfile(path):
"""Check if path is an existing file.
:Parameters:
path: Path to the file
"""
if not os.path.isfile(path):
if os.path.isdir(path):
msg = "{0} is a directory".format(path)
else:
msg = "{0} does not exist.".format(path)
raise argparse.ArgumentTypeError(msg)
return path
def get_arguments():
"""Retrieves the arguments of the program.
Returns: An object that contains the arguments
"""
# Parsing arguments
parser = argparse.ArgumentParser(description=__doc__, usage=
"{0} -h"
.format(sys.argv[0]))
parser.add_argument('-i', '-amplicon_file', dest='amplicon_file', type=isfile, required=True,
help="Amplicon is a compressed fasta file (.fasta.gz)")
parser.add_argument('-s', '-minseqlen', dest='minseqlen', type=int, default=400,
help="Minimum sequence length for dereplication")
parser.add_argument('-m', '-mincount', dest='mincount', type=int, default=10,
help="Minimum count for dereplication")
parser.add_argument('-c', '-chunk_size', dest='chunk_size', type=int, default=100,
help="Chunk size for dereplication")
parser.add_argument('-k', '-kmer_size', dest='kmer_size', type=int, default=8,
help="kmer size for dereplication")
parser.add_argument('-o', '-output_file', dest='output_file', type=str,
default="OTU.fasta", help="Output file")
return parser.parse_args()
def read_fasta(amplicon_file, minseqlen):
""" Generate Sequences from fasta file
:param amplicon_file: file fasta.gz
:param minseqlen: minimal length of sequences (int)
:return: Sequence (len(Sequence) >= minseqlen)
"""
with gzip.open(amplicon_file, 'rt') as myfile:
sub_sequence = []
sequences = myfile.readlines()
for sequence in sequences:
if (
len(sequence) == 0
or not sequence[0] in "TGAC"
):
sequence = "".join(sub_sequence).replace("\n", "")
sub_sequence = []
if len(sequence) >= minseqlen:
yield sequence
else:
sub_sequence.append(sequence)
def dereplication_fulllength(amplicon_file, minseqlen, mincount):
""" Generate unique sequences that appear at least mincount and return them (descending order)
:param amplicon_file: fasta file
:param minseqlen: minimal length of a sequence
:param mincount: minimal number of occurancies
:return: [sequence, number of occurancies]
"""
sequences = {}
for sequence in read_fasta(amplicon_file, minseqlen):
if not sequence in sequences:
sequences[sequence] = 1
else:
sequences[sequence] += 1
sequences = {k: v for k, v in sorted(sequences.items(),
key=lambda item: item[1],
reverse=True)
if v >= mincount}
for sequence in sequences:
yield [sequence, sequences[sequence]]
def get_chunks(sequence, chunk_size):
"""
Give a list of sub-sequences (at least 4 segments / sequence)
:param sequence: sequence
:param chunk_size: length of a segment
:return: list of sub-sequences (len = chunk_size) non-overlapping
"""
# on split la sequence en au moins 4 segments (ou plus) de taille chunk-size
print("sequence: " + str(sequence))
seq = list(sequence) # "AAAAA" -> A,A,A,A,A
start = 0
sub_sequence = []
while (start + chunk_size < len(sequence)) and (len(sub_sequence) < 4):
sub_seq = []
for i in range(start, start + chunk_size):
sub_seq.append(seq[i])
sub_sequence.append("".join(sub_seq)) # A,A,A -> "AAA"
start = start + chunk_size # on update start
return sub_sequence
def get_unique(ids):
""" Function predefined by the teacher
:param ids:
:return:
"""
return {}.fromkeys(ids).keys()
def common(lst1, lst2):
"""
Function predefined by the teacher
:param lst1:
:param lst2:
:return:
"""
return list(set(lst1) & set(lst2))
def cut_kmer(sequence, kmer_size):
""" From a sequence, generate kmers (length k)
:param sequence: sequence
:param kmer_size: length of a kmer
:return: kmers
"""
seq = list(sequence)
for i in range(len(sequence) - kmer_size + 1):
kmer = []
for j in range(i, i + kmer_size):
kmer.append(seq[j])
yield "".join(kmer)
def get_identity(alignment_list):
""" Give the percentage of identity between 2 sequences
:param alignment_list: alignment (sequence1, sequence2)
:return: percentage of identity between the two sequences
"""
alignment = alignment_list
sequence1 = list(alignment[0])
sequence2 = list(alignment[1])
identity = 0
for i in range(len(sequence1)):
if (
sequence1[i] == sequence2[i]
and sequence1[i] != "-"
):
identity = identity + 1 / len(sequence1)
return identity * 100
def get_unique_kmer(kmer_dict, sequence, id_seq, kmer_size):
""" Select only unique kmers in a dictionary of kmers
:param kmer_dict: dictionary of kmers
:param sequence: sequence
:param id_seq: integer
:param kmer_size: integer
:return: dictionary of kmers
"""
for kmer in cut_kmer(sequence, kmer_size):
if kmer in kmer_dict:
if not id_seq in kmer_dict[kmer]:
kmer_dict[kmer].append(id_seq)
else:
kmer_dict[kmer] = [id_seq]
return kmer_dict
def search_mates(kmer_dict, sequence, kmer_size):
""" Give 8 sequences with the most similarities with our sequence
:param kmer_dict: dictionary of kmers
:param sequence: sequence
:param kmer_size: length of kmers
:return: list of 8 sequences the most similar to sequence
"""
most_common = Counter(kmer_dict).most_common(kmer_size)
counter = 0
result = []
while True:
if counter >= kmer_size or counter >= len(most_common) - 1:
return result
indexs = most_common[counter][1]
for index in indexs:
if not index in result:
result.append(index)
counter += 1
def detect_chimera(perc_identity_matrix):
""" Detect if it is a chimera (1) or not (0)
:param perc_identity_matrix: matrix
:return: boolean 1 : it is a chimera, 0 it is not
"""
perc_identity_matrix = numpy.array(perc_identity_matrix)
shape = numpy.shape(perc_identity_matrix)
print(shape)
moy = 0
print("perc_identity_matrix: " + str(perc_identity_matrix))
for i in range(shape[1]):
ecart_type = numpy.std(perc_identity_matrix[::, i])
print("ecart_type: " + str(ecart_type))
moy += ecart_type
moy = moy / shape[1]
return moy > 5
def chimera_removal(amplicon_file, minseqlen, mincount, chunk_size, kmer_size):
"""
Return non-chimera sequences with their occurancy
:param amplicon_file: fasta file
:param minseqlen: (int) minimal length of a sequence
:param mincount: (int) minimal number of occurancies
:param chunk_size: (int) size of each chunk / segment
:param kmer_size: (int) length of each kmer
:return: generator of non-chimera sequences with their occurancy [sequence, count]
"""
sub_sequences = [] # garde en mémoire les sequences et les sub-sequence
# Step 1
for sequence, occurancy in dereplication_fulllength(amplicon_file, minseqlen, mincount):
segments = get_chunks(sequence, chunk_size)
sub_sequences.append(segments)
if len(sub_sequences) < 2 :
continue
id_seq = 0
for seq in sub_sequences:
sequence = seq[0]
index_sequence = seq[1]
kmer_dict = {}
for sub_sequence_index, sub_sequence in enumerate(sequence):
id_seq += 1
kmer_dict = get_unique_kmer(kmer_dict, sub_sequence, id_seq, kmer_size)
mates = []
for target_seq in range(index_sequence):
mates.append(search_mates(kmer_dict,
sequence[target_seq][sub_sequence_index],
kmer_size))
return print("Non abouti, "
"je n'ai pas réussi à comprendre les étapes malgré vos explications")
def abundance_greedy_clustering(amplicon_file, minseqlen, mincount, chunk_size, kmer_size):
""" Identify OTU
:param amplicon_file: fasta file
:param minseqlen: (int) minimal length of a sequence
:param mincount: (int) minimal number of occurancies
:param chunk_size: (int) size of each chunk / segment
:param kmer_size: (int) length of each kmer
:return: List of OTU with occurancy
"""
OTU_list = []
non_chimeral_list = chimera_removal(amplicon_file, minseqlen, mincount, chunk_size, kmer_size)
for i, (sequence1, occurancy1) in enumerate(non_chimeral_list):
for j in range(i + 1, len(non_chimeral_list)):
sequence2, occurancy2 = non_chimeral_list[j]
if (sequence1 != sequence2) and \
(get_identity([sequence1, sequence2]) > 97) and \
(occurancy1 < occurancy2):
continue
OTU_list.append(sequence1)
return OTU_list
def fill(text, width=80):
"""Split text with a line return to respect fasta format"""
return os.linesep.join(text[i:i + width] for i in range(0, len(text), width))
def write_otu(otu_list, output_file):
""" Write the results on a file
:param otu_list: list of OTU
:param output_file: name of output file
:return: Generate a file named output_file
"""
with open(output_file, "w") as myfile:
for counter, otu_element in enumerate(otu_list):
sequence_otu, occurence_otu = otu_element
myfile.write(">OTU_{0} occurence:{1}\n".format(counter + 1, occurence_otu))
myfile.write(fill(sequence_otu) + "\n")
myfile.close()
def main():
"""
Main program function
"""
# Get arguments
args = get_arguments()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.