text stringlengths 38 1.54M |
|---|
import unittest
from city_functions import get_city_country
class CityCountryTestCase(unittest.TestCase):
"""Tests for get_city_country()"""
def test_get_city_country(self):
"""Do inputs like 'Lagos, Nigeria' work?"""
formatted_name = get_city_country('lagos', 'nigeria')
self.assertEqual(formatted_name, 'Lagos, Nigeria')
def test_city_country_population(self):
"""Tests if inputing a population will work"""
formatted_Name = get_city_country('lagos', 'nigeria', 30000000)
unittest.main() |
from django import template
from django.template import Library, Node
# Create your views here.
from django.core.context_processors import csrf
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template import Library
from django.http import HttpResponseRedirect
from operator import itemgetter
from django import forms
from django.contrib.auth.models import User
from book.models import Post
from users.models import UserInfo
from django.db.models import Count
from django.contrib import auth
register = template.Library()
#class User(User):
# location = GeoPointField()
class UserInfoNode(Node):
def render(self, context):
userinfo = UserInfo.objects.filter(id='50c0a4dded8f5b082c235570')
context['userinfo'] = userinfo
return ''
@register.tag(name='get_user_info')
def get_user_info(parser, token):
return UserInfoNode()
#Gett top 10 locations
class GetTopLocation(Node):
def render(self, context):
locations_freqs = Post.objects.all()
get_top_loc=[]
for loc in locations_freqs:
for l in loc.location:
print l
get_top_loc.append(l)
context['top_locations'] = get_top_loc
return ''
@register.tag(name='get_top_loc')
def get_top_location(parser, token):
return GetTopLocation()
#Get all locations
class GetLocation(Node):
def render(self, context):
locations = Post.objects.order_by('location')
context['locations'] = locations
return ''
@register.tag(name='get_location')
def get_location(parser, token):
return GetLocation()
#Get Tags
class GetTopTags(Node):
def render(self, context):
tags_freqs = Post.objects.all()
get_top_freqs=[]
for tag in tags_freqs:
for t in tag.tags:
get_top_freqs.append(t)
context['top_tags'] = get_top_freqs
return ''
@register.tag(name='get_top_tags')
def get_top_tags(parser, token):
return GetTopTags()
#Get all tags
class GetTags(Node):
def render(self, context):
tags = Post.objects.order_by('tags')
context['tags'] = tags
return ''
@register.tag(name='get_tags')
def get_tags(parser, token):
return GetTags()
class ArticelNode(Node):
def render(self, context):
#currentuser = User.objects.get(id=request.user.id)
#users_articel = Post.objects(auther=currentuser)
#context['users_articel'] = users_articel
return ''
@register.tag(name='get_users_articel')
def get_users_articel(parser, token):
return ArticelNode() |
#!/usr/bin/python
import os
import sys
import unittest
from BeautifulSoup import BeautifulSoup
sys.path.insert(1, os.path.join(sys.path[0], '../../'))
from pixnet.spiders.blog import BlogSpider
class BlogTest(unittest.TestCase):
def setUp(self):
self.spider = BlogSpider()
dirname = os.path.dirname(os.path.realpath(__file__))
test_file_1 = os.path.join(dirname, 'testcase/329049123.html')
with open(test_file_1, 'r') as f:
html = f.read()
self.soup1 = BeautifulSoup(html)
test_file_2 = os.path.join(dirname, 'testcase/43919692.html')
with open(test_file_2, 'r') as f:
html = f.read()
self.soup2 = BeautifulSoup(html)
def test_is_secret_aritcle(self):
self.assertTrue(self.spider._is_secret_aritcle(self.soup1))
self.assertFalse(self.spider._is_secret_aritcle(self.soup2))
def test_get_next_link(self):
self.assertEqual("", self.spider._get_next_link(self.soup1))
self.assertEqual("", self.spider._get_next_link(self.soup2))
def test_get_prev_link(self):
self.assertEqual("", self.spider._get_prev_link(self.soup1))
self.assertEqual("http://bajenny.pixnet.net/blog/post/43823356-2016%e5%ae%9c%e8%98%ad%e5%9c%8b%e9%9a%9b%e7%ab%a5%e7%8e%a9%e7%af%80%7e%e7%ab%a5%e7%8e%a9%e7%af%80%e5%ae%9c%e8%98%ad%e6%b0%91%e5%ae%bf-%e5%ae%9c%e8%98%ad%e9%a3%af%e5%ba%97", \
self.spider._get_prev_link(self.soup2))
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
import recommendations
allSimilar = []
file = open("data.txt", 'a')
newline = '\n'
tab = '\t'
file.write(f'First User Chosen: {tab} 368{newline}')
file.write(f'Second User Chosen: {tab} 81 {newline}')
file.write(f'Third User Chosen: {tab} 135 {newline}{newline}')
pref = recommendations.loadMovieLens()
# Get sorted list of user ratings
userRatings1 = (sorted(pref['368'].items(), key =
lambda kv:(kv[1], kv[0])))
userRatings2 = (sorted(pref['81'].items(), key =
lambda kv:(kv[1], kv[0])))
userRatings3 = (sorted(pref['135'].items(), key =
lambda kv:(kv[1], kv[0])))
# Get top 5 for each user
userRatings1.reverse()
userRatings2.reverse()
userRatings3.reverse()
# Formatted File output
file.write(f'First User Rating: {newline}')
file.write(f'ID 368 Top 3 Rated Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings1[x][0]
rating = userRatings1[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
file.write(f'{newline}ID 368 Bottom 3 Rated Movies: {newline}')
userRatings1.reverse()
for x in range(0,3):
name = userRatings1[x][0]
rating = userRatings1[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
file.write(f'{newline}Second User Rating: {newline}')
file.write(f'ID 81 Top 3 Rated Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings2[x][0]
rating = userRatings2[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
userRatings2.reverse()
file.write(f'{newline}ID 81 Bottom 3 Rated Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings2[x][0]
rating = userRatings2[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
file.write(f'{newline}Third User Rating: {newline}')
file.write(f'ID 135 Top 3 Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings3[x][0]
rating = userRatings3[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
userRatings3.reverse()
file.write(f'{newline}ID 135 Bottom 3 Rated Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings3[x][0]
rating = userRatings3[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
file.write(f'{newline}{newline}Substitute User ID: 368 {newline}{newline}')
# Find most correlated users
closest_5 = recommendations.topMatches(pref, '368')
# Find least correlated users
furthest_5 = recommendations.worstMatches(pref, '368')
# Output for least and most correlated users
file.write(f'Five other users with highest correlation: {newline}{newline}')
for x in closest_5:
correlationValue = round(x[0])
tempId = x[1]
file.write(f'User ID:{tempId} {tab}Correlation Value: {correlationValue}{newline}')
file.write(f'{newline}Five other users with lowest correlation: {newline}')
for y in furthest_5:
correlationValue = round(y[0])
tempId = y[1]
file.write(f'User ID:{tempId} {tab}Correlation Value: {correlationValue}{newline}')
recommendedMovies = recommendations.getRecommendations(pref, '368')
file.write(f'{newline}Computed Top 5 Movies to be Watched: {newline}')
for x in range(0,5):
rating = recommendedMovies[x][0]
name = recommendedMovies[x][1]
file.write(f'Name of Movie: {name}{tab} Calculated Rating: {rating}{newline}')
file.write(f'{newline}Computed Bottom 5 Movies to be Watched: {newline}')
recommendedMovies.reverse()
for y in range(0,5):
rating = recommendedMovies[y][0]
name = recommendedMovies[y][1]
file.write(f'Name of Movie: {name}{tab} Calculated Rating: {rating}{newline}')
file.write(f'{newline}{newline}Favorite Movie: {tab} Jurassic Park (1993){newline}')
file.write(f'Least Favorite Movie: {tab} Children of the Corn: The Gathering (1996){newline}{newline}')
similarMovies = recommendations.calculateSimilarItems(pref)
notSimilarMovies = recommendations.calculateLeastSimilarItems(pref)
file.write(f'Top Recommended Movies to be Watched for Jurassic Park: {newline}')
# print(similarMovies['Jurassic Park (1993)'])
for x in similarMovies['Jurassic Park (1993)']:
name = x[1]
rating = x[0]
file.write(f'Name of Movie: {name}{tab} Calculated Correlation: {rating}{newline}')
file.write(f'{newline}Bottom Recommended Movies to be Watched for Jurassic Park{newline}')
for x in notSimilarMovies['Jurassic Park (1993)']:
name = x[1]
rating = x[0]
file.write(f'Name of Movie: {name}{tab} Calculated Correlation: {rating}{newline}')
file.write(f'{newline}Top Recommended Movies to be Watched for Children of the Corn: {newline}')
for x in similarMovies['Children of the Corn: The Gathering (1996)']:
name = x[1]
rating = x[0]
file.write(f'Name of Movie: {name}{tab} Calculated Correlation: {rating}{newline}')
file.write(f'{newline}Bottom Recommended Movies to be Watched for Children of the Corn{newline}')
for x in notSimilarMovies['Children of the Corn: The Gathering (1996)']:
name = x[1]
rating = x[0]
file.write(f'Name of Movie: {name}{tab} Calculated Correlation: {rating}{newline}') |
#!/usr/bin/env python
import ROOT
import array
class Jet(object):
def __init__(self, pt, eta, phi, mass):
self.pt = pt
self.eta = eta
self.phi = phi
self.mass = mass
class FSR(object):
def deltaR(self, jet1, jet2):
deltaPhi = ROOT.TVector2.Phi_mpi_pi(jet1.phi-jet2.phi)
deltaEta = jet1.eta-jet2.eta
return ROOT.TMath.Sqrt(deltaEta*deltaEta + deltaPhi*deltaPhi)
def __init__(self, nano=False):
self.nano = nano
self.debug = False
self.lastEntry = -1
self.branches = []
self.branchBuffers = {}
# FSR jet candidates
self.branchBuffers['nfsr_Jet'] = array.array('i', [0])
self.branches.append({'name': 'nfsr_Jet', 'formula': self.getBranch, 'arguments': 'nfsr_Jet', 'type': 'i'})
self.fsrJetProperties = ['fsrJet_pt', 'fsrJet_eta', 'fsrJet_phi', 'fsrJet_mass', 'fsrJet_deltaR']
for fsrJetProperty in self.fsrJetProperties:
self.branchBuffers[fsrJetProperty] = array.array('f', [0.0, 0.0, 0.0, 0.0])
self.branches.append({'name': fsrJetProperty, 'formula': self.getVectorBranch, 'arguments': {'branch': fsrJetProperty, 'length':4}, 'length': 4, 'leaflist': fsrJetProperty + '[nfsr_Jet]/F'})
# corrected Higgs properties
self.higgsProperties = ['HCMVAV2_reg_fsrCorr_pt', 'HCMVAV2_reg_fsrCorr_eta', 'HCMVAV2_reg_fsrCorr_phi', 'HCMVAV2_reg_fsrCorr_mass']
for higgsProperty in self.higgsProperties:
self.branchBuffers[higgsProperty] = array.array('f', [0.0])
self.branches.append({'name': higgsProperty, 'formula': self.getBranch, 'arguments': higgsProperty})
self.branchBuffers['nisr_Jet'] = array.array('i', [0])
self.branches.append({'name': 'nisr_Jet', 'formula': self.getBranch, 'arguments': 'nisr_Jet', 'type': 'i'})
self.isrJetProperties = ['isrJet_pt', 'isrJet_eta', 'isrJet_phi', 'isrJet_mass', 'isrJet_deltaR']
for isrJetProperty in self.isrJetProperties:
self.branchBuffers[isrJetProperty] = array.array('f', [0.0, 0.0, 0.0, 0.0])
self.branches.append({'name': isrJetProperty, 'formula': self.getVectorBranch, 'arguments': {'branch': isrJetProperty, 'length':4}, 'length': 4, 'leaflist': isrJetProperty + '[nisr_Jet]/F'})
def customInit(self, initVars):
self.sample = initVars['sample']
def getBranches(self):
return self.branches
# read from buffers which have been filled in processEvent()
def getBranch(self, event, arguments=None):
self.processEvent(event)
if arguments:
return self.branchBuffers[arguments][0]
# read from buffers which have been filled in processEvent()
def getVectorBranch(self, event, arguments=None, destinationArray=None):
self.processEvent(event)
for i in range(arguments['length']):
destinationArray[i] = self.branchBuffers[arguments['branch']][i]
def processEvent(self, tree):
currentEntry = tree.GetReadEntry()
# if current entry has not been processed yet
if currentEntry != self.lastEntry:
self.lastEntry = currentEntry
higgsCandidateJets = []
hJCMVAV2idx = [tree.hJCMVAV2idx[0], tree.hJCMVAV2idx[1]]
for i in range(2):
higgsCandidateJets.append(Jet(pt=tree.Jet_pt_reg[hJCMVAV2idx[i]], eta=tree.Jet_eta[hJCMVAV2idx[i]], phi=tree.Jet_phi[hJCMVAV2idx[i]], mass=tree.Jet_mass[hJCMVAV2idx[i]]))
# find FSR/ISR candidates and sort by pT
fsrCandidateJets = []
isrCandidateJets = []
for i in range(tree.nJet):
additionalJet = Jet(pt=tree.Jet_pt[i], eta=tree.Jet_eta[i], phi=tree.Jet_phi[i], mass=tree.Jet_mass[i])
if tree.Jet_pt[i] > 30:
if tree.Jet_puId[i] == 7 and i not in hJCMVAV2idx and min(self.deltaR(additionalJet, higgsCandidateJets[0]), self.deltaR(additionalJet, higgsCandidateJets[1])) < 0.8:
fsrCandidateJets.append(additionalJet)
elif tree.Jet_puId[i] == 7 and i not in hJCMVAV2idx and abs(tree.Jet_eta[i]) < 2.4:
isrCandidateJets.append(additionalJet)
fsrCandidateJets.sort(key=lambda jet: jet.pt, reverse=True)
isrCandidateJets.sort(key=lambda jet: jet.pt, reverse=True)
# save up to 4 candidate jets
self.branchBuffers['nfsr_Jet'][0] = min(len(fsrCandidateJets), 3)
for i in range(self.branchBuffers['nfsr_Jet'][0]):
self.branchBuffers['fsrJet_pt'][i] = fsrCandidateJets[i].pt
self.branchBuffers['fsrJet_eta'][i] = fsrCandidateJets[i].eta
self.branchBuffers['fsrJet_phi'][i] = fsrCandidateJets[i].phi
self.branchBuffers['fsrJet_mass'][i] = fsrCandidateJets[i].mass
self.branchBuffers['fsrJet_deltaR'][i] = min(self.deltaR(fsrCandidateJets[i], higgsCandidateJets[0]), self.deltaR(fsrCandidateJets[i], higgsCandidateJets[1]))
self.branchBuffers['nisr_Jet'][0] = min(len(isrCandidateJets), 3)
for i in range(self.branchBuffers['nisr_Jet'][0]):
self.branchBuffers['isrJet_pt'][i] = isrCandidateJets[i].pt
self.branchBuffers['isrJet_eta'][i] = isrCandidateJets[i].eta
self.branchBuffers['isrJet_phi'][i] = isrCandidateJets[i].phi
self.branchBuffers['isrJet_mass'][i] = isrCandidateJets[i].mass
self.branchBuffers['isrJet_deltaR'][i] = min(self.deltaR(isrCandidateJets[i], higgsCandidateJets[0]), self.deltaR(isrCandidateJets[i], higgsCandidateJets[1]))
# correct higgs by highest FSR jet
higgs = ROOT.TLorentzVector()
higgs.SetPtEtaPhiM(tree.HCMVAV2_reg_pt, tree.HCMVAV2_reg_eta, tree.HCMVAV2_reg_phi, tree.HCMVAV2_reg_mass)
if len(fsrCandidateJets) > 0:
fsr = ROOT.TLorentzVector()
fsr.SetPtEtaPhiM(fsrCandidateJets[0].pt, fsrCandidateJets[0].eta, fsrCandidateJets[0].phi, fsrCandidateJets[0].mass)
if self.debug:
print "Higgs: :", tree.HCMVAV2_reg_pt, tree.HCMVAV2_reg_eta, tree.HCMVAV2_reg_phi, tree.HCMVAV2_reg_mass
print " +FSR:", fsrCandidateJets[0].pt, fsrCandidateJets[0].eta, fsrCandidateJets[0].phi, fsrCandidateJets[0].mass
print " deltaR:", self.deltaR(fsrCandidateJets[0], higgsCandidateJets[0]), " / ", self.deltaR(fsrCandidateJets[0], higgsCandidateJets[1])
print " nFSR:", len(fsrCandidateJets)
oldMass = higgs.M()
higgs = higgs + fsr
if self.debug:
if abs(125-higgs.M()) < abs(125-oldMass):
print "\x1b[32m",
else:
print "\x1b[31m",
print " -> ", higgs.Pt(), higgs.Eta(), higgs.Phi(), higgs.M(), "\x1b[0m"
self.branchBuffers['HCMVAV2_reg_fsrCorr_pt'][0] = higgs.Pt()
self.branchBuffers['HCMVAV2_reg_fsrCorr_eta'][0] = higgs.Eta()
self.branchBuffers['HCMVAV2_reg_fsrCorr_phi'][0] = higgs.Phi()
self.branchBuffers['HCMVAV2_reg_fsrCorr_mass'][0] = higgs.M()
return True
|
from app import db
class CustomerApplication(db.Model):
id=db.Column(db.Integer,primary_key=True)
firstName=db.Column(db.String(20))
lastName=db.Column(db.String(20))
dateOfBirth=db.Column(db.Date)
address=db.Column(db.String(30))
city=db.Column(db.String(30))
zipcode=db.Column(db.String(10))
monthly_income=db.Column(db.Integer)
cardType=db.Column(db.String(20))
application_type=db.Column(db.String(1))
occupation=db.Column(db.String(20))
contact_number=db.Column(db.String(15))
error_details=db.Column(db.String(40))
ppsn=db.column(db.Integer)
email=db.Column(db.String(30))
def __init__(self,firstName=None, lastName=None, dateOfBirth=None,address=None,city=None,zipcode=None, monthly_income=None,cardType=None, occupation=None, ppsn=None,email=None, contact_number=None,application_type=None, eligibility=None,error_details=None):
self.firstName = firstName
self.lastName = lastName
self.dateOfBirth = dateOfBirth
self.address = address
self.monthly_income = monthly_income
self.application_type = application_type
self.occupation = occupation
self.eligibility = eligibility
self.error_details=error_details
self.contact_number=contact_number
self.ppsn = ppsn
self.city=city
self.zipcode=zipcode
self.cardType=cardType
self.email=email
def CollectApplications(self):
db.create_all()
db.session.add(self)
db.session.commit() |
#!/usr/bin/env python
#coding=utf-8
import math
import rospy
from goap_2021.srv import *
from precondition_little import *
from setting_little_goap import *
from goap_little_server import *
def GOAP_script(req):
global penalty_mission
global counter
global action
global position
global cup
global counter_scripts
global previous_team
# del action[:]
# del position[:]
# del cup[:]
# reset
print("len action", len(action))
# if len(action) == 0 + 1 or (req.team != 2 and req.team != previous_team):
# counter_scripts = 0
# del action[:]
# del position[:]
# del cup[:]
if req.emergency == True:
# print("debug action [0]", action[0])#delete this !!!
action.insert( 0,0)
position.insert( 0,req.my_pos[0])
position.insert( 0, req.my_pos[1])
position.insert( 0,req.my_pos[2] )
cup.insert( 0, 0)
cup.insert( 0,0)
cup.insert( 0,0)
return action, position, cup
if len(action) == 0 + 1 or (req.team != 2 and req.team != previous_team):
# counter_scripts = 0
action.append(0)
position.append(req.my_pos[0])
position.append(req.my_pos[1])
position.append(req.my_pos[2])
cup.append(0)
cup.append(0)
cup.append(0)
if req.emergency == False and counter_scripts == 0: #blue team script
(current, robot1) = mission_precondition(req)
if req.team == 0:
if req.ns == False:
scrpit_mission =[404]
elif req.ns == True:
scrpit_mission = [404]
position_script = [404,404,404]
elif req.team == 1:
# cup_script = [0, 21, 0, 34, 0, 0, 0, 0, 0, 0, 20, 7, 19, 5, 16, 8, 15, 6, 10, 9, 9, 11, 6, 10, 5, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# position_script = [ 800.0, 2700.0, 0.0, 300.0, 2775.0, 0.0, ]
if req.ns == False:
# scrpit_mission =[31,7,26,27,11,30,4]
# position_script = [ 1078, 2804,math.pi,352.8, 2070, math.pi, 156.346, 2041.4, -2.999581, 416.851, 2077.254, -2.999581, 1208.911,2738.020,-0.587998, 450,2500,0.0,300.0, 2500.0, 0.0]
scrpit_mission =[31,7,26,27,11]
position_script = [ 1078, 2804,math.pi,352.8, 2070, math.pi, 156.346, 2041.4, -2.999581,
416.851, 2077.254, -2.999581,
1208.911,2738.020,-0.587998]
elif req.ns == True:
scrpit_mission =[31,7,26,27,11,30,5]
position_script = [1100, 2800,math.pi, 80, 2150, math.pi, 50, 2150, math.pi, 80, 2150, math.pi, 450,2500,math.pi, 950,2500,math.pi,1300.0, 2500.0, math.pi ]
# action: [13, 14, 2, 16, 17, 12, 12, 12, 12, 12, 12, 12, 12, 9, 18, 19, 20, 21, 22, 23, 1, 15]
# position: [1085.0, 2600.0, 0.0, 500.0, 2600.0, 0.0, 100.0, 2725.0, 3.1415927410125732, 50.0, 2725.0, 3.1415927410125732, 100.0, 2725.0, 3.1415927410125732, 95.84193420410156, 2394.103759765625, 0.0, 243.43145751953125, 1994.5025634765625, -0.7853981852531433, 643.4314575195312, 1850.5025634765625, -2.356194496154785, 1043.431396484375, 1680.5025634765625, -0.7853981852531433, 1043.431396484375, 1220.5025634765625, -0.7853981852531433, 643.4314575195312, 1050.5025634765625, -0.7853981852531433, 243.43145751953125, 906.5025024414062, -0.7853981852531433, 81.69979858398438, 712.8905029296875, 1.1780972480773926, 1850.0, 1200.0, 0.0, 1870.0, 1200.0, 0.0, 1650.0, 1200.0, 0.0, 1900.0, 1200.0, 3.1415927410125732, 1800.0, 1200.0, 3.1415927410125732, 1770.0, 1200.0, 3.1415927410125732, 1650.0, 1200.0, 3.1415927410125732, 1850.0, 2800.0, 1.5707963705062866, 1850.0, 2300.0, 1.5707963705062866]
# cup: [0, 21, 0, 34, 0, 0, 0, 0, 0, 0, 20, 7, 19, 5, 16, 8, 15, 6, 10, 9, 9, 11, 6, 10, 5, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
count_script = 0
count_cup = 0
while count_script < len( scrpit_mission):
# if scrpit_mission[count_script] > 14:
action.append(scrpit_mission[count_script])
position.append(position_script[ 3* count_script])
position.append(position_script[ 3* count_script + 1])
position.append(position_script[ 3* count_script + 2])
cup.append(0)
cup.append(0)
cup.append(0)
# else:
# for m in current.leaf:
# if m.name == scrpit_mission[ count_script ] or m.no == scrpit_mission[ count_script ]:
# action.append(m.no)
# position.append(m.location[0])
# position.append(m.location[1])
# position.append(m.location[2])
# cup.append(cup_script[ 2* count_script])
# cup.append(cup_script[ 2* count_script + 1])
count_script += 1 #for appending next action
#pop old action
if counter_scripts > 0 and req.emergency == False:
action.pop(0)
position.pop(0)
position.pop(0)
position.pop(0)
cup.pop(0)
cup.pop(0)
cup.pop(0)
counter_scripts += 1
previous_team = req.team
for a in range(0, len(action)):
print( a, "mission", action[a], "position", position[3*a], position[3*a + 1], position[3*a + 2], "cup", cup[ 3* a], cup[ 3* a + 1], cup[ 3* a + 2])
return action, position, cup |
import glob
import json
#open the json files
def combined_function():
with open('data/cities.json', "r") as infile:
cities = json.load(infile)
with open('data/libraries.json', "r") as infile:
libraries = json.load(infile)
#create combined dict and combine them
combined = {}
for i in range(len(cities)):
combined_books = 0
for j in range(len(libraries)):
if (cities[i]['name'] == libraries[j]['city']):
combined_books += libraries[j]['books']
combined[cities[i]['name']] = {
"population": cities[i]["population"],
"books": combined_books
}
#write to a new json file
with open('data/combined.json', 'w') as f:
json.dump(combined, f)
combined_function() |
#%%
import os
import sys
import random
import numpy as np
import pickle as pkl
import shelve
import itertools as it
import networkx as nx
import scipy.sparse as sp
from utils import loadWord2Vec, clean_str
from math import log
from sklearn import svm
from nltk.corpus import wordnet as wn
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy.spatial.distance import cosine
if len(sys.argv) != 2:
sys.exit("Use: python build_graph.py <dataset>")
datasets = ['20ng', 'R8', 'R52', 'ohsumed', 'mr']
# build corpus
dataset = sys.argv[1]
# dataset = 'np'
# from time import time
# t0 = time()
# if dataset not in datasets:
# sys.exit("wrong dataset name")
# Read Word Vectors
# word_vector_file = 'data/glove.6B/glove.6B.300d.txt'
# word_vector_file = 'data/corpus/' + dataset + '_word_vectors.txt'
#_, embd, word_vector_map = loadWord2Vec(word_vector_file)
# word_embeddings_dim = len(embd[0])
word_embeddings_dim = 300
word_vector_map = {}
# shulffing
doc_name_list = []
doc_train_list = []
doc_test_list = []
doc_pred_list = []
for line in open('data/' + dataset + '.txt', 'r'):
doc_name_list.append(line.strip())
temp = line.split("\t")
if temp[1].find('test') != -1:
doc_test_list.append(line.strip())
elif temp[1].find('train') != -1:
doc_train_list.append(line.strip())
elif temp[1].find('target') != -1:
doc_pred_list.append(line.strip())
doc_content_list = []
for line in open('data/corpus/' + dataset + '.clean.txt', 'r'):
doc_content_list.append(line.strip())
train_ids = []
for train_name in doc_train_list:
train_id = doc_name_list.index(train_name)
train_ids.append(train_id)
random.shuffle(train_ids)
# partial labeled data
#train_ids = train_ids[:int(0.2 * len(train_ids))]
with open('data/' + dataset + '.train.index', 'w') as f:
f.write('\n'.join(map(str, train_ids)))
test_ids = []
for test_name in doc_test_list:
test_id = doc_name_list.index(test_name)
test_ids.append(test_id)
random.shuffle(test_ids)
pred_ids = []
for pred_name in doc_pred_list:
pred_id = doc_name_list.index(pred_name)
pred_ids.append(pred_id)
random.shuffle(pred_ids)
with open('data/' + dataset + '.test.index', 'w') as f:
f.write('\n'.join(map(str, test_ids)))
with open('data/' + dataset + '.target.index', 'w') as f:
f.write('\n'.join(map(str, pred_ids)))
shuffled_metadata = []
shuffled_doc_list = []
for ind in train_ids + test_ids + pred_ids:
shuffled_metadata.append(doc_name_list[ind])
shuffled_doc_list.append(doc_content_list[ind])
with open('data/' + dataset + '_shuffle.txt', 'w') as f:
f.write('\n'.join(shuffled_metadata))
with open('data/corpus/' + dataset + '_shuffle.txt', 'w') as f:
f.write('\n'.join(shuffled_doc_list))
# build vocab
word_freq = {}
word_set = set()
for doc_words in shuffled_doc_list:
words = doc_words.split()
for word in words:
word_set.add(word)
if word in word_freq:
word_freq[word] += 1
else:
word_freq[word] = 1
vocab = list(word_set)
vocab_size = len(vocab)
# x: feature vectors of training docs, no initial features
# slect 90% training set
total_size = len(shuffled_doc_list)
train_size = len(train_ids)
test_size = len(test_ids)
learning_size = train_size + test_size
pred_size = len(pred_ids)
val_size = int(0.1 * train_size)
real_train_size = train_size - val_size # - int(0.5 * train_size)
node_size = train_size + vocab_size + test_size + pred_size
word_doc_list = {}
for i in range(total_size):
doc_words = shuffled_doc_list[i]
words = doc_words.split()
for word in set(words):
if word in word_doc_list:
doc_list = word_doc_list[word]
doc_list.append(i)
word_doc_list[word] = doc_list
else:
word_doc_list[word] = [i]
word_doc_freq = {}
for word, doc_list in word_doc_list.items():
word_doc_freq[word] = len(doc_list)
word_id_map = {vocab[i]: i for i in range(vocab_size)}
with open('data/corpus/' + dataset + '_vocab.txt', 'w') as f:
f.write('\n'.join(vocab))
#%%
'''
Word definitions begin
'''
# import nltk
# nltk.download('wordnet')
# definitions = []
# for word in vocab:
# word = word.strip()
# synsets = wn.synsets(clean_str(word))
# word_defs = []
# for synset in synsets:
# syn_def = synset.definition()
# word_defs.append(syn_def)
# word_des = ' '.join(word_defs)
# if word_des == '':
# word_des = '<PAD>'
# definitions.append(word_des)
# with open('data/corpus/' + dataset + '_vocab_def.txt', 'w') as f:
# f.write('\n'.join(definitions))
# tfidf_vec = TfidfVectorizer(max_features=1000)
# tfidf_matrix = tfidf_vec.fit_transform(definitions)
# tfidf_matrix_array = tfidf_matrix.toarray()
# word_vectors = []
# for i in range(len(vocab)):
# word = vocab[i]
# vector = tfidf_matrix_array[i]
# str_vector = []
# for j in range(len(vector)):
# str_vector.append(str(vector[j]))
# temp = ' '.join(str_vector)
# word_vector = word + ' ' + temp
# word_vectors.append(word_vector)
# def loadWord2Vec(filename):
# """Read Word Vectors"""
# vocab = []
# embd = []
# word_vector_map = {}
# for line in open(filename, 'r'):
# row = line.strip().split(' ')
# if(len(row) > 2):
# vocab.append(row[0])
# vector = row[1:]
# length = len(vector)
# for i in range(length):
# vector[i] = float(vector[i])
# embd.append(vector)
# word_vector_map[row[0]] = vector
# print('Loaded Word Vectors!')
# return vocab, embd, word_vector_map
# word_vector_file = 'data/corpus/' + dataset + '_word_vectors.txt'
# with open(word_vector_file, 'w') as f:
# f.write('\n'.join(word_vectors))
# _, embd, word_vector_map = loadWord2Vec(word_vector_file)
# word_embeddings_dim = len(embd[0])
'''
Word definitions end
'''
#%%
word_vectors = np.random.uniform(-0.01, 0.01, (vocab_size, word_embeddings_dim))
for i in range(vocab_size):
word = vocab[i]
if word in word_vector_map:
vector = word_vector_map[word]
word_vectors[i] = vector
# label list
label_list = list({meta.split('\t')[2] for meta in shuffled_metadata})
def create_label_matrix(doc_name_list, label_list):
one_hot_labels = [label_list.index(meta.split('\t')[2]) for meta in doc_name_list]
return np.identity(len(label_list))[one_hot_labels]
with open('data/corpus/' + dataset + '_labels.txt', 'w') as f:
f.write('\n'.join(label_list))
# tx: feature vectors of test docs, no initial features
row_tx = []
col_tx = []
data_tx = []
for i in range(test_size):
doc_vec = np.zeros(word_embeddings_dim)
doc_words = shuffled_doc_list[i + train_size]
words = doc_words.split()
for word in words:
if word in word_vector_map:
doc_vec = doc_vec + np.array(word_vector_map[word])
for j in range(word_embeddings_dim):
row_tx.append(i)
col_tx.append(j)
# np.random.uniform(-0.25, 0.25)
data_tx.append(doc_vec[j] / len(words))
tx = sp.csr_matrix((data_tx, (row_tx, col_tx)), shape=(test_size, word_embeddings_dim))
ty = create_label_matrix(shuffled_metadata[train_size:train_size+test_size], label_list)
# px: feature vectors of prediction docs, no initial features
row_px = []
col_px = []
data_px = []
for i in range(pred_size):
doc_vec = np.zeros(word_embeddings_dim)
doc_words = shuffled_doc_list[i + learning_size]
words = doc_words.split()
for word in words:
if word in word_vector_map:
doc_vec = doc_vec + np.array(word_vector_map[word])
for j in range(word_embeddings_dim):
row_px.append(i)
col_px.append(j)
# np.random.uniform(-0.25, 0.25)
data_px.append(doc_vec[j] / len(words))
px = sp.csr_matrix((data_px, (row_px, col_px)), shape=(pred_size, word_embeddings_dim))
py = create_label_matrix(shuffled_metadata[learning_size:learning_size+pred_size], label_list)
# allx: the the feature vectors of both labeled and unlabeled training instances
# unlabeled training instances -> words
row_allx = []
col_allx = []
data_allx = []
for i in range(train_size):
doc_vec = np.zeros(word_embeddings_dim)
doc_words = shuffled_doc_list[i]
words = doc_words.split()
for word in words:
if word in word_vector_map:
doc_vec = doc_vec + np.array(word_vector_map[word])
for j in range(word_embeddings_dim):
row_allx.append(i)
col_allx.append(j)
# np.random.uniform(-0.25, 0.25)
data_allx.append(doc_vec[j] / len(words))
for i in range(vocab_size):
for j in range(word_embeddings_dim):
row_allx.append(i + train_size)
col_allx.append(j)
data_allx.append(word_vectors.item((i, j)))
allx = sp.csr_matrix((data_allx, (row_allx, col_allx)), shape=(train_size + vocab_size, word_embeddings_dim))
ally = np.r_[create_label_matrix(shuffled_metadata[:train_size], label_list),
np.zeros((vocab_size,len(label_list)))]
# print(x.shape, y.shape, tx.shape, ty.shape, allx.shape, ally.shape)
'''
Doc word heterogeneous graph
'''
# word co-occurence with context windows
window_size = 20
windows = []
for doc_words in shuffled_doc_list:
words = doc_words.split()
length = len(words)
if length <= window_size:
windows.append(words)
else:
for j in range(length - window_size + 1):
window = words[j: j + window_size]
windows.append(window)
word_window_freq = {}
for window in windows:
for word in set(window):
if word in word_window_freq:
word_window_freq[word] += 1
else:
word_window_freq[word] = 1
word_pair_count = {}
for window in windows:
for i in range(1, len(window)):
for j in range(0, i):
word_i_id = word_id_map[window[i]]
word_j_id = word_id_map[window[j]]
if word_i_id == word_j_id:
continue
ij_pair = (word_i_id, word_j_id)
if ij_pair in word_pair_count:
word_pair_count[ij_pair] += 1
else:
word_pair_count[ij_pair] = 1
# two orders
ji_pair = (word_j_id, word_i_id)
if ji_pair in word_pair_count:
word_pair_count[ji_pair] += 1
else:
word_pair_count[ji_pair] = 1
row = []
col = []
weight = []
# pmi as weights
num_window = len(windows)
for key in word_pair_count:
i, j = key
count = word_pair_count[key]
word_freq_i = word_window_freq[vocab[i]]
word_freq_j = word_window_freq[vocab[j]]
pmi = log((count / num_window) / (word_freq_i * word_freq_j/(num_window **2)))
if pmi <= 0:
continue
row.append(train_size + i)
col.append(train_size + j)
weight.append(pmi)
# word vector cosine similarity as weights
'''
for i in range(vocab_size):
for j in range(vocab_size):
if vocab[i] in word_vector_map and vocab[j] in word_vector_map:
vector_i = np.array(word_vector_map[vocab[i]])
vector_j = np.array(word_vector_map[vocab[j]])
similarity = 1.0 - cosine(vector_i, vector_j)
if similarity > 0.9:
print(vocab[i], vocab[j], similarity)
row.append(train_size + i)
col.append(train_size + j)
weight.append(similarity)
'''
# doc word frequency
doc_word_freq = {}
for doc_id in range(total_size):
words = shuffled_doc_list[doc_id].split()
for word in words:
word_id = word_id_map[word]
doc_word_pair = (doc_id, word_id)
if doc_word_pair in doc_word_freq:
doc_word_freq[doc_word_pair] += 1
else:
doc_word_freq[doc_word_pair] = 1
for doc_id in range(total_size):
words = shuffled_doc_list[doc_id].split()
for word in set(words):
word_id = word_id_map[word]
freq = doc_word_freq[(doc_id, word_id)]
if doc_id < train_size:
row.append(doc_id)
else:
row.append(doc_id + vocab_size)
col.append(train_size + word_id)
idf = log(total_size / word_doc_freq[vocab[word_id]])
weight.append(freq * idf)
adj = sp.csr_matrix((weight, (row, col)), shape=(node_size, node_size))
#%% load corpus
features = sp.vstack((allx, tx, px)).tolil()
targets = np.vstack((ally, ty, py))
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj) #???
# train_mask = np.r_[
# np.ones(real_train_size), np.zeros(node_size - real_train_size)
# ].astype(bool)
# val_mask = np.r_[
# np.zeros(real_train_size), np.ones(val_size), np.zeros(vocab_size + test_size + pred_size)
# ].astype(bool)
# test_mask = np.r_[
# np.zeros(node_size - test_size - pred_size), np.ones(test_size), np.zeros(pred_size)
# ].astype(bool)
# pred_mask = np.r_[
# np.zeros(node_size - pred_size), np.ones(pred_size)
# ].astype(bool)
# y_train = targets * np.tile(train_mask,(2,1)).T
# y_val = targets * np.tile(val_mask,(2,1)).T
# y_test = targets * np.tile(test_mask,(2,1)).T
# y_pred = targets * np.tile(pred_mask,(2,1)).T
with shelve.open('data/' + dataset + '.shelve') as d:
d.clear()
d['adj'] = adj
d['features'] = features
d['targets'] = targets
d['train_size'] = train_size
d['real_train_size'] = real_train_size
d['val_size'] = val_size
d['test_size'] = test_size
d['vocab_size'] = vocab_size
d['node_size'] = node_size
d['pred_size'] = pred_size
d['label_list'] = label_list
# d['y_train'] = y_train
# d['y_val'] = y_val
# d['y_test'] = y_test
# d['train_mask'] = train_mask
# d['val_mask'] = val_mask
# d['test_mask'] = test_mask
# d['y_pred'] = y_pred
# d['pred_mask'] = pred_mask
# print(time()-t0)
#%%
# # dump objects
# with open("data/ind.{}.x".format(dataset), 'wb') as f:
# pkl.dump(x, f)
# with open("data/ind.{}.y".format(dataset), 'wb') as f:
# pkl.dump(y, f)
# with open("data/ind.{}.tx".format(dataset), 'wb') as f:
# pkl.dump(tx, f)
# with open("data/ind.{}.ty".format(dataset), 'wb') as f:
# pkl.dump(ty, f)
# with open("data/ind.{}.allx".format(dataset), 'wb') as f:
# pkl.dump(allx, f)
# with open("data/ind.{}.ally".format(dataset), 'wb') as f:
# pkl.dump(ally, f)
# with open("data/ind.{}.adj".format(dataset), 'wb') as f:
# pkl.dump(adj, f)
|
import os
import boto3
import datetime
if __name__ == '__main__':
sim_day = str(datetime.date.today())
client = boto3.resource('s3')
bucket = client.Bucket('active-matter-simulations')
for obj in bucket.objects.filter(Prefix = f'ANFDM/{sim_day}'):
print(obj.key)
if not os.path.exists(os.path.dirname(obj.key)):
os.makedirs(os.path.dirname(obj.key))
bucket.download_file(obj.key, obj.key)
|
# this file contains the class for the DenseNet neural network
# reference : pytorch - DenseNet
import torch
import torch.nn as nn
import torch.nn.functional as F
torch.manual_seed(0)
from collections import OrderedDict
from torch import Tensor
from typing import Any
class _Transition(nn.Sequential):
"""
Transition layers after a dense block.
Transition layers structure :
(norm):
BatchNorm2d(num_input_features)
(relu):
ReLU(inplace=True)
(conv):
Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1)
(pool):
AvgPool2d(kernel_size=2, stride=2)
"""
def __init__(self, num_input_features: int, num_output_features: int) -> None:
"""
:param num_input_features : int, input size features
:param num_output_features : int, output size features
"""
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class _DenseLayer(nn.Module):
"""
Define one dense layer.
Dense layer stucture :
(norm1):
BatchNorm2d(num_input_features)
(relu1):
ReLU(inplace=True)
(conv1):
Conv2d(num_input_features, bn_size*growth_rate, kernel_size=1, stride=1, bias=False)
(norm2):
BatchNorm2d(bn_size*growth_rate)
(relu2):
ReLU(inplace=True)
(conv2):
Conv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)
"""
def __init__(self, num_input_features:int, growth_rate:int, bn_size:int, drop_rate:float) -> None:
"""
:param num_input_features : int, number of input channels
:param growth_rate : int, number of output channels of dense layers
:param bn_size : int, intermediate layer size factor such that interm_size = bn_size*growth_rate
:param drop_rate : float, dropout rate
"""
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)),
self.drop_rate = float(drop_rate)
def bn_function(self, inputs: torch.Tensor):
"""
:param inputs : torch.Tensor, input list of tensor
:return : torch.Tensor, concatenate input tensors
"""
# type: (List[Tensor]) -> Tensor
concated_features = torch.cat(inputs, 1)
bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features))) # noqa: T484
return bottleneck_output
def forward(self, input):
"""
:param inputs : torch.Tensor, input list of tensor
:return : torch.Tensor, concatenate input tensors
"""
if isinstance(input, Tensor):
prev_features = [input]
else:
prev_features = input
bottleneck_output = self.bn_function(prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate,
training=self.training)
return new_features
class _DenseBlock(nn.ModuleDict):
_version = 2
"""
Dense block module dictionnary.
Dense net structure repeated num_layers times:
(DenseLayer):
_DenseLayer(num_input_features + i * growth_rate, growth_rate=growth_rate, bn_size=bn_size, drop_rate=drop_rate)
"""
def __init__(self, num_layers: int, num_input_features: int, bn_size:int, growth_rate:int, drop_rate:float)->None:
"""
:param num_layers : int, number of dense layer in the dense block
:param num_input_features : int, number of input channels
:param bn_size : int, intermediate layer size factor such that interm_size = bn_size*growth_rate
:param growth_rate : int, number of output channels of dense layers
:param drop_rate : float, dropout rate between layers
"""
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
)
self.add_module('denselayer%d' % (i + 1), layer)
def forward(self, init_features):
"""
:param init_features : torch.Tensor, dense block input
:return : torch.Tensor, dense block output
"""
features = [init_features]
for name, layer in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
class DenseNet(nn.Module):
"""
Dense neural network module.
"""
def __init__(self, growth_rate: int=32, block_config: tuple=(6, 12, 24, 16),
num_init_features:int=64, bn_size:int=4, drop_rate:float=0, in_channels:int = 1, out_channels:int=2) -> None:
"""
:param growth_rate : int, number of output channels of dense layers - default : growth_rate = 32
:param block_config : tuple, number of dense layer in the four dense block - default : block_config = (6, 12, 24, 16)
:param num_init_features : int, number of input channels - default : bn_size = 64
:param bn_size : int, intermediate layer size factor such that interm_size = bn_size*growth_rate - default : bn_size = 4
:param drop_rate : float, dropout rate between layers - default : drop_rate = 0
:param in_channels : int, number of channels in the image - default : in_channels = 1
:param out_channels : int, number of labels for classification - default: out_channels = 2
"""
super(DenseNet, self).__init__()
# Convolution and pooling part from table-1
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(in_channels, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Add multiple denseblocks based on config
# for densenet-121 config: [6,12,24,16]
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate,
)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
# add transition layer between denseblocks to
# downsample
trans = _Transition(num_input_features=num_features,
num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
# Linear layer
self.classifier = nn.Linear(num_features, out_channels)
def forward(self, x: torch.Tensor)-> torch.Tensor:
"""
:param x : torch.Tensor, image tensor
:return : torch.Tensor, forward neural network pass
"""
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = torch.flatten(out, 1)
out = F.sigmoid(self.classifier(out))
return out
def _densenet(arch:str, growth_rate:int, block_config:tuple, num_init_features:int, in_channels:int, out_channels:int , **kwargs) ->DenseNet:
"""
Return the DenseNet module with a given configuration.
:param arch : string, name of the achitecture
:param growth_rate : int, number of output channels of dense layers
:param block_config : tuple, number of dense layer in the four dense block
:param num_init_features : int, number of input channels
:param in_channels : int, number of input channels
:param out_channels : int, number of labels for classification
:return : DenseNet, DenseNet model
"""
model = DenseNet(growth_rate, block_config, num_init_features, in_channels = in_channels, out_channels = out_channels, **kwargs)
return model
def densenet121(in_channels:int = 1, out_channels:int = 2,**kwargs) ->DenseNet:
"""
Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.
:param in_channels : int, number of input channels - default : in_channels = 1
:param out_channels : int, number of labels for classification - default : out_channels = 2
:return : DenseNet, DenseNet-121 model
"""
return _densenet('densenet121', 32, (6, 12, 24, 16), 64, in_channels, out_channels, **kwargs)
def densenet161(in_channels:int = 1, out_channels:int = 2, **kwargs: Any) -> DenseNet:
"""
Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.
:param in_channels : int, number of input channels - default : in_channels = 1
:param out_channels : int, number of labels for classification - default : out_channels = 2
:return : DenseNet, DenseNet-161 model
"""
return _densenet('densenet161', 48, (6, 12, 36, 24), 96, in_channels, out_channels, **kwargs)
|
# from mpl_toolkits import mplot3d
#
# import numpy as np
# import matplotlib.pyplot as plt
#
# fig = plt.figure()
# ax = plt.axes(projection="3d")
#
# z_line = np.linspace(0, 15, 1000)
# x_line = np.cos(z_line)
# y_line = np.sin(z_line)
# ax.plot3D(x_line, y_line, z_line, 'gray')
#
# z_points = 15 * np.random.random(100)
# x_points = np.cos(z_points) + 0.1 * np.random.randn(100)
# y_points = np.sin(z_points) + 0.1 * np.random.randn(100)
# ax.scatter3D(x_points, y_points, z_points, c=z_points, cmap='hsv')
#
# plt.show()
# %matplotlib notebook
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
from benchmarks import functions as fun
fig = plt.figure()
ax = fig.gca(projection='3d')
# x_points = 10 * np.random.random(100)
# y_points = 10 * np.random.random(100)
# z_points = np.sqrt(x_points ** 2 + y_points ** 2)
# ax.scatter3D(x_points, y_points, z_points, c=z_points, cmap=cm.coolwarm, alpha=1)
# Make data.
X = np.arange(0, 40, 0.25)
Y = np.arange(0, 40, 0.25)
X, Y = np.meshgrid(X, Y)
R = np.sqrt(X ** 2 + Y ** 2)
Z = R
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm, alpha=0.8,
linewidth=0, antialiased=False)
# theCM = cm.get_cmap()
# theCM._init()
# # Plot the surface.
# surf = ax.plot_surface(X, Y, Z, cmap=theCM)
# Customize the z axis.
# ax.set_zlim(0, 55)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.set_xlabel('x label')
ax.set_ylabel('y label')
ax.set_zlabel('z label')
plt.show()
# import numpy as np
# import matplotlib.pyplot as plt
# import mpl_toolkits.mplot3d.axes3d as p3
# import matplotlib.animation as animation
#
# # Fixing random state for reproducibility
# np.random.seed(19680801)
#
#
# def Gen_RandLine(length, dims=2):
# """
# Create a line using a random walk algorithm
#
# length is the number of points for the line.
# dims is the number of dimensions the line has.
# """
# lineData = np.empty((dims, length))
# lineData[:, 0] = np.random.rand(dims)
# for index in range(1, length):
# # scaling the random numbers by 0.1 so
# # movement is small compared to position.
# # subtraction by 0.5 is to change the range to [-0.5, 0.5]
# # to allow a line to move backwards.
# step = ((np.random.rand(dims) - 0.5) * 0.1)
# lineData[:, index] = lineData[:, index - 1] + step
#
# return lineData
#
#
# def update_lines(num, dataLines, lines):
# for line, data in zip(lines, dataLines):
# # NOTE: there is no .set_data() for 3 dim data...
# line.set_data(data[0:2, :num])
# line.set_3d_properties(data[2, :num])
# return lines
#
# # Attaching 3D axis to the figure
# fig = plt.figure()
# ax = p3.Axes3D(fig)
#
# # Fifty lines of random 3-D lines
# data = [Gen_RandLine(25, 3) for index in range(50)]
#
# # Creating fifty line objects.
# # NOTE: Can't pass empty arrays into 3d version of plot()
# lines = [ax.plot(dat[0, 0:1], dat[1, 0:1], dat[2, 0:1])[0] for dat in data]
#
# # Setting the axes properties
# ax.set_xlim3d([0.0, 1.0])
# ax.set_xlabel('X')
#
# ax.set_ylim3d([0.0, 1.0])
# ax.set_ylabel('Y')
#
# ax.set_zlim3d([0.0, 1.0])
# ax.set_zlabel('Z')
#
# ax.set_title('3D Test')
#
# # Creating the Animation object
# line_ani = animation.FuncAnimation(fig, update_lines, 25, fargs=(data, lines),
# interval=50, blit=False)
#
# plt.show()
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.utils.module_loading import import_string
TEMPLATE_EMAIL_SENDER = getattr(settings, 'SKY_TEMPLATE_EMAIL_SENDER', 'sky_visitor.template_email_senders.DjangoTemplateSender')
TEMPLATE_EMAIL_SENDER_CLASS = import_string(TEMPLATE_EMAIL_SENDER)
SEND_USER_PASSWORD = getattr(settings, 'SKY_SEND_USER_PASSWORD', False)
|
from src.api import API as API
class Example:
def static_init() -> None:
return API.static_init()
def download_wp_media() -> None:
return API.download_all_media()
Example.static_init()
Example.download_wp_media() |
# coding=utf-8
import tensorflow as tf
from bert import tokenization, modeling
import os
from bert.run_classifier import convert_single_example_simple
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
def get_inputdata(query):
token = tokenization.CharTokenizer(vocab_file=bert_vocab_file)
split_tokens = token.tokenize(query)
word_ids = token.convert_tokens_to_ids(split_tokens)
word_mask= [1] * len(word_ids)
word_segment_ids = [0] * len(word_ids)
return word_ids,word_mask,word_segment_ids
# 配置文件
data_root = './bert/weight/chinese_L-12_H-768_A-12/'
bert_config_file = data_root + 'bert_config.json'
bert_config = modeling.BertConfig.from_json_file(bert_config_file)
init_checkpoint = data_root + 'bert_model.ckpt'
bert_vocab_file = data_root + 'vocab.txt'
bert_vocab_En_file = './bert/weight/uncased_L-12_H-768_A-12/vocab.txt'
# graph
input_ids = tf.placeholder(tf.int32, shape=[None, None], name='input_ids')
input_mask = tf.placeholder(tf.int32, shape=[None, None], name='input_masks')
segment_ids = tf.placeholder(tf.int32, shape=[None, None], name='segment_ids')
# 初始化BERT
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=False)
# 加载bert模型
tvars = tf.trainable_variables()
(assignment, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment)
# 获取最后一层。
output_layer = model.get_sequence_output()# 这个获取每个token的output 输出[batch_size, seq_length, embedding_size] 如果做seq2seq 或者ner 用这个
output_layer_pooled = model.get_pooled_output() # 这个获取句子的output
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
query = u'今天去哪里吃'
# word_ids, word_mask, word_segment_ids=get_inputdata(query)
token = tokenization.CharTokenizer(vocab_file=bert_vocab_file)
word_ids, word_mask, word_segment_ids=convert_single_example_simple(max_seq_length=32,tokenizer=token,text_a=query,text_b='这里吃')
print(len(word_ids))
print(word_mask)
print(word_segment_ids)
fd = {input_ids: [word_ids], input_mask: [word_mask], segment_ids: [word_segment_ids]}
last, last2 = sess.run([output_layer, output_layer_pooled], feed_dict=fd)
print('last shape:{}, last2 shape: {}'.format(last.shape, last2.shape))
pass
|
# -*- coding: utf-8 -*-
'''
用于调整一张图片的透明度的,如果不是png格式会先转化成png的RGBA格式然后调整透明度
run: python transparentHelper.py pictureName transparency
'''
import sys
import os
from PIL import Image
import shutil
def main(filename, transparency):
if not os.path.isfile(filename):
print 'No such picture file...'
return
elif int(transparency) > 100 or int(transparency) < 0:
print 'Wrong input, the rate of transparency should be between 0 and 100'
return
else:
if filename[-3:] != 'png':
try:
im = Image.open(filename)
targetName = filename[:-4] + '.' + 'png'
im.convert('RGBA').save(targetName)
os.remove(filename)
except:
print 'Error occured when changing the mode RGBA for the file " {} "'.format(filename)
return
else:
im = Image.open(filename)
if im.mode != 'RGBA':
try:
im.convert('RGBA').save(filename)
except:
print 'Error occured when changing the mode RGBA for the file " {} "'.format(filename)
return
im.close()
targetName = filename
transparency = int(transparency)
rate = int(255 * transparency / 100)
newName = targetName[:-4] + '_' + str(transparency) + '%' + targetName[-4:]
shutil.copy(targetName, newName)
target = Image.open(newName)
data = target.getdata()
newData = []
for i in data:
newData.append((i[0], i[1], i[2], rate))
target.putdata(newData)
target.save(newName)
print 'All done!'
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'Not enough inputs, standard input should be: python transparentHelper.py pictureName transparency'
exit()
main(sys.argv[1], sys.argv[2])
|
n = int(input())
for i in range(2,n+1):
c=0;
for j in range(1,i+1):
if i % j == 0:
c = c + 1
if(c==2):
print(i)
|
from enum import Enum
class ParserTypes(Enum):
group = 'group'
jisho = 'jisho'
mine = 'mine'
readings_after = 'after'
ruby = 'ruby'
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Card search
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
# Third-party modules
import tornado.web
import ujson
# NOC modules
from noc.sa.models.useraccess import UserAccess
from .card import CardRequestHandler
class SearchRequestHandler(CardRequestHandler):
def get(self, *args, **kwargs):
scope = self.get_argument("scope")
query = self.get_argument("query")
card = self.CARDS.get(scope)
if not card or not hasattr(card, "search"):
raise tornado.web.HTTPError(404)
result = card.search(self, query)
self.set_header("Content-Type", "application/json")
self.write(ujson.dumps(result))
def get_user_domains(self):
return UserAccess.get_domains(self.current_user)
|
import numpy as np
import matplotlib.pylab as plt
# sigmoid function used broadcast for numpy array.
def sigmoid(x):
return 1 / (1 + np.exp(-x))
a = np.array([-1.0, 1.0, 2.0])
print(sigmoid(a))
# 이와 같은 시그모이드 함수를 그래프로 나타내 그 변화를 살펴보자.
x = np.arange(-5.0, 5.0, 0.1)
y = sigmoid(x)
plt.plot(x, y)
plt.ylim(-0.1, 1.1) # y축의 범위 지정
plt.show()
|
import pygame,sys
import random
from pygame.locals import *
import math
pygame.init()
DIMENSIONS=[485,375]
screen=pygame.display.set_mode(DIMENSIONS,RESIZABLE)
pygame.display.set_caption('Guess The Word')
BG_COLOR=[50,50,50]
#BG_COLOR=[233,249,242]
screen.fill(BG_COLOR)
pygame.display.flip()
dictionary={"QUALIFICATION":"The act of qualifying","RECYCLE":"Convert waste into reusable form","SALVATION":"The saving of someone from harm","MALFUNCTION":"Fail to function normally",
"INTERCOM":"An electrical device allowing one-way or two-way communication","GYROSCOPE":"A rotating device used for maintaining stability or a fixed direction","CLAUSTROPHOBIA":"An extreme fear of being in an enclosed place",
"BARBECUE":"An outdoor meal at which food is grilled over an open fire ","ANNIHILATION":"To destroy compeletely","DELEGATE":"A person sent to represent","FONDUE":"A dish of melted cheese","JURISDICTION":"The official power to make legal decisions",
"KINDLE":"Light a flame","QUENCH":"To satisy thirst","LIQUORICE":"A black substance used as a sweet","PENTHOUSE":"A flat on the top floor of a tall building","RATATOUILLE":"A movie was made on this dish featuring a chef mouse","TODDLER":"A yound child who is just beginning to walk",
"UNDERTAKER":"A person whose job is to prepare dead bodies for burial","VICEROY":"A person sent by a monarch to govern a colony","WARRIOR":"An experienced or brave fighter","XEROX":"A photocopy","YOGA":"An exercise based on Hindu philosopy",
"ZENITH":"A point of gretest power or success","HOLOCAUST":"Destruction or killing on a mass scale","INFERNO":"A large uncontrollable fire","KAMIKAZE":"A sucide bomber",
"MARVEL":"A wonder","COMPUTER":"A programmable device used for big calculations","ORIGAMI":"Japanese art of paper folding","ISOSCELES":'Having 2 sides equal',"HAMBURGER":"An iconic American fast food",
"YESTERDAY":"The day before today","NOSTALGIC":"Remembering a memory of past","ORB":"A precious spherical ornament","PENNY":"Coin","SECLUSION":"The state of being private and apart from others","UNIVERSITY":"An educational institute/college",
"DISTICTION":"Outstanding excellence","DIGNITY":"State of being in respect","ALTIMETER":"A device to show altitude",
"FLOPPY":"A portable storage device used in 1990s","WISDOM":"The quality of being wise","TORNADO":"A voilent rotating windstorm","MASTERPIECE":"A work of outstanding skill","GUARANTEE":"A formal promise to do something","LAUGH":"An act of laughing"
,"FRIGHT":"A sudden strong felling of fear","MORTGAGE":"Transfer a property to a creditor as a security for a loan",'BLACKOUT':'A short period when all lights are gone','BOARDGAME':'A game played on a board',
'BREAKTHROUGH':'An game-changing scientific discovery','BUSNINESSMAN':'A person working in business','CALAMITY':'A natural disaster','CALCULATE':'Finding the result of an operation on 2 nos.','CARBONATED':'CO2 or fizz added drink','CARPENTER':'A person who makes wooden furniture',
'CONTRAST':'Opposite of each other','DARTBOARD':'A board used in darts game','DEFICIENCY':'Lack of something necessary','UNDISPUTED':'Cannot be doubted or questioned','UNRAVEL':'To come apert or collapse','UPSHOT':'The final result or outcome','VODKA':'A Russian strong alcoholic drink',
'SPINNER':'A spinning toy','PRESTIGE':'The power to impress others','PEN':'What is mightier than sword','OUST':'To remove someone from his job for oneself','OZONE LAYER':'Protective blanket which stops UV light','OUIJA BOARD':'A board to talk with the dead',
'NOSTALGIA':'The feeling of remembering the memories of past','INFIRMARY':'A small hospital or room for ill people','HERBICIDE':'A chemical to destroy unwanted plants','HACKSAW':'A tool to cut metal','GYMNASIUM':'A place for physical exercise','GRATITUDE':'To show thankfullness','FOURSOME':'F**R people playing a game',
'HALO':'The circle of light above an angel head','TRIUMPH':'A feeling of great satisfaction after victory','SWASTIKA':'A Nazi or Hindu mythology symbol','RUGBY':'American football','QUARTER':'A period of 4 months','QUARANTINE':'Harmfull','WAYFARER':'A traveller on foot',
'ZODIAC':'Astrological symbol','YESTERYEAR':'An year before','YAMMER':'To talk loudly continously','TRAPEZIUM':'A quadrilateral with two sides parallel','TELLTALE':'A secret going on from one generation to another','LASSITUDE':'A state of feeling very tired physically and mentally'}
word_list=list(dictionary.keys())
help_list=list(dictionary.values())
used_words=[]
#########------------------------sounds-----------
def play_keyboard_sound():
mouse_sound = pygame.mixer.Sound("keyboard_sound.wav")
pygame.mixer.Sound.play(mouse_sound)
pygame.mixer.music.stop()
def play_mouse_sound():
mouse_sound = pygame.mixer.Sound("mouse_sound.wav")
pygame.mixer.Sound.play(mouse_sound)
pygame.mixer.music.stop()
def getNewWord():
return random.choice(word_list).upper()
def getHelp(current_word):
return dictionary[current_word].capitalize()
def set_word(word,unlock_letters=''):
temp=''
for a in word:
if a.upper() in unlock_letters.upper():
temp+=a
elif a.upper() not in 'AEIOU':
temp+='-'
else:
temp+=a
return temp.upper()
def display_word(word,color,cord,size):
f=pygame.font.match_font('erasmediumitc')
font = pygame.font.Font(f, int(size))
text = font.render(word, True, color)
screen.blit(text, cord)
def draw_gfx(round_no=0,gussed_word=' '):
screen.fill(BG_COLOR)
help_box=pygame.draw.rect(screen,[0,255,0],[302,315,182,60])
display_word('HINT',BG_COLOR,[300,300],80)
display_word(gussed_word,[255,201,14],[10,100],(560/len(gussed_word)))
display_word('POINTS: '+str(points),[255,0,0],[20,20],30)
display_round(round_no+1)
return help_box
def display_round(round_no):
f1=pygame.font.match_font('AGENCYFB')
f2=pygame.font.match_font('lcd')
font1 = pygame.font.Font(f1, 60)
font2 = pygame.font.Font(f2, 60)
text1 = font1.render('ROUND:', True, [255,0,255])
text2 = font2.render(str(round_no), True, [255,255,0])
screen.blit(text1, [280,10])
screen.blit(text2, [432,37])
def display_name(name='Guest Player'):
display_won()
f1=pygame.font.match_font('ARIAL')
font1 = pygame.font.Font(f1, 30)
text1 = font1.render('Please Enter your name: ', True, [255,0,255])
text2 = font1.render(name, True, [255,255,0])
screen.blit(text1, [120,235])
screen.blit(text2, [120,270])
pygame.display.update()
def display_won():
screen.fill([50,50,50])
f1=pygame.font.match_font('AGENCYFB')
font1 = pygame.font.Font(f1, 80)
text1 = font1.render('WON', True, [255,255,255])
screen.blit(text1,[190,150])
return
def display_continue():
screen.fill(BG_COLOR)
f1=pygame.font.match_font('AGENCYFB')
font1 = pygame.font.Font(f1, 55)
text1 = font1.render('ROUND FINISHED', True, [0,0,0])
text2 = font1.render('Shall we continue...', True, [255,255,255])
screen.blit(text1,[100,170])
screen.blit(text2,[70,225])
flagg=True
pygame.display.update()
while flagg:
for e in pygame.event.get():
if e.type==MOUSEMOTION:
flagg=False
if e.type==MOUSEBUTTONDOWN:
flagg=False
if e.type==KEYDOWN:
flagg=False
def show_fps(time):
pygame.draw.rect(screen,BG_COLOR,[0,360,120,15])
f1=pygame.font.match_font('ARIAL')
font1 = pygame.font.Font(f1, 20)
text1 = font1.render('FPS: '+str(math.ceil(int(time.get_fps()))), True, [120,120,140])
screen.blit(text1,[0,360])
#pygame.display.update()
def show_promo():
f1=pygame.font.match_font('ARIAL')
font1 = pygame.font.Font(f1,55)
text1 = font1.render('Guess The Word', True, [125,125,125])
screen.blit(text1,[70,60])
font2 = pygame.font.Font(f1,25)
text2 = font2.render('-TJ Productions 2017', True, [200,200,200])
screen.blit(text2,[120,110])
pygame.display.update()
show_promo()
used_words=[]
def main():
global round_no,points,used_words
flag=True
while True:
current_word=getNewWord()
if current_word not in used_words:
used_words+=[current_word]
break
gussed_word=set_word(current_word)
hint=getHelp(current_word)
if round_no!=0:
display_continue()
pygame.display.update()
clock=pygame.time.Clock()
hint_shown=False
change=True
won=False
letters=''
i=0
while flag:
if i<60:
help_box=draw_gfx()
change=False
i+=1
elif i==60:
change=True
i+=1
else:
change=False
for e in pygame.event.get():
if e.type==QUIT:
pygame.quit()
points=0
sys.exit()
elif e.type==KEYDOWN:
play_keyboard_sound()
letter=e.unicode.upper()
if letter not in current_word:
points+=-10
elif letter in current_word:
if letter in gussed_word and letter!='-':
points+=-5
change=True
continue
letters+=letter
points+=20
gussed_word=set_word(current_word,letters)
change=True
elif e.type==MOUSEBUTTONDOWN:
mouse_pos=pygame.mouse.get_pos()
play_mouse_sound()
change=True
if help_box.collidepoint(mouse_pos):
if not hint_shown:
hint_shown=True
points+=-60
change=True
elif gussed_word==current_word:
points+=40
won=True
else:
pass
#change=False
if change:
draw_gfx(round_no,gussed_word)
if hint_shown==True:
display_word(hint,[250,250,250],[0,200],16)
if won and round_no==19:
display_won()
return [False,1,clock]
if won:
return [True,0]
pygame.display.update()
clock.tick(30)
return False
points=0
for round_no in range(20):
a=main()
if not a[0]:
if a[1]:
display_name()
clock=a[2]
name=''
qq=0
while True:
if qq==1:
f=open('Point List.txt','a')
f.write(name+': '+str(points)+'\n')
f.close()
break
for e in pygame.event.get():
if e.type==QUIT:
pygame.quit()
sys.exit()
if e.type==KEYDOWN:
if e.key==13:
qq=1
name+=e.unicode
display_name(name)
clock.tick(30)
pygame.quit()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 12 11:07:01 2021
@author: nate_mac
"""
"""
Write a script to plot the period of a plane pendulum as a function of
amplitude from 0 to π. Include a line showing the approximate solution for
comparison. At what amplitude does the exact solution differ from the
approximate solution by 1%?
"""
from pylab import *
from scipy.integrate import quad
def gaus(x):
return exp(-x**2)
print(quad(gaus,-inf,inf))
|
# Default Imports
import pandas as pd
import numpy as np
dataframe_1 = pd.read_csv('data/house_prices_multivariate.csv')
dataframe_2 = pd.read_csv('data/house_prices_copy.csv')
# Return the correlation value between the SalePrice column for the two loaded datasets
# Your code here
def correlation():
sp_1=dataframe_1.loc[:, "SalePrice"]
sp_2=dataframe_2.loc[:, "SalePrice"]
correlation=float(np.corrcoef(sp_1,sp_2)[0,1])
return correlation
|
'''
Created on 21/03/2014
@author: Beto
'''
'''
yourAge = int(raw_input('How old are you: '))
if (yourAge > 0) and (yourAge < 120):
if (yourAge == 35):
print "Same as me"
elif (yourAge > 35):
print "Older than me"
else:
print "Younger than me"
else:
print "Don't lie about your age"
print
'''
#Conditional Expression
x, y = 1, 0 #default values
g = 'varDefinida'
g = None
# del g #desdefine g
try:
x = int(raw_input("dame un numero: "))
except:
# x se queda con el default
pass
# {valor = asignado si TRUE} if condicion else {valor asignado si FALSE}
a = 'y is less than x' if ( y < x) else 'x is less or equal than y'
print x, y, a
print "valor de x:%d valor de y:%d por lo tanto %s " % (x,y,a)
if ( y < x ):
a = 'condicion verdadera'
else:
a='condicion false'
2print x, y, a
|
#dart_result = input()
dart_result = "1S2D*3T"
score = [1, 1, 1]
d_index = 0
for i in range(0, 3):
try:
num = int(dart_result[d_index:d_index+2])
d_index += 1
except Exception as e:
num = int(dart_result[d_index])
d_index += 1
if dart_result[d_index] == 'S':
score[i] *= num
elif dart_result[d_index] == 'D':
score[i] *= pow(num, 2)
elif dart_result[d_index] == 'T':
score[i] *= pow(num, 3)
if (d_index + 1) < len(dart_result):
if dart_result[d_index + 1] == '*':
d_index += 1
score[i] *= 2
if i != 0:
score[i - 1] *= 2
elif dart_result[d_index + 1] == '#':
d_index += 1
score[i] *= -1
d_index += 1
print(sum(score)) |
# Generated by Django 2.1.7 on 2019-03-18 06:22
from django.db import migrations
from django.core.management import call_command
def load_initial_palika_codes(apps, schema_editor):
call_command('loaddata', 'initial_palika.json', app_label='geo')
class Migration(migrations.Migration):
dependencies = [
('geo', '0001_initial'),
]
operations = [
# NOTE: Don't Need this now
# migrations.RunPython(load_initial_palika_codes),
]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 11 11:06:46 2013
@author: pathos
"""
from __future__ import division
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D, proj3d
import pylab
from scipy.interpolate import griddata
import numpy as np
import matplotlib.pyplot as plt
def gen_sinusoidal(N):
x = np.linspace(0, 2*np.pi, N, endpoint=True)
mu, sigma = np.sin(x), 0.2
t = np.random.normal(mu, sigma, N)
return x,t
def fit_polynomial(x, t, M):
phi = getPhi(x, M)
pinverse = np.linalg.pinv(phi)
w = np.dot(pinverse, t)
return w, phi
def plot_unregularized(x, t, M_list):
plt.figure("unregularized linear regression")
for (M, i) in zip(M_list, range(len(M_list))):
subplot = 220 + i+1
plt.subplot(subplot)
w,phi = fit_polynomial(x, t, M)
smooth,xpol = smoothing(M, 100)
plt.plot(x,t,'co')
plt.plot(xpol,np.sin(xpol), 'green', label = "original")
label = "M:"+str(M)
plt.plot(xpol, np.dot(smooth, w.transpose()), 'blue', label = label)
plt.legend()
def fit_polynomial_reg(x, t, M, lamd):
phi = getPhi(x, M)
pinv = np.dot(lamd, np.identity(M+1))
pinv = pinv + np.dot(phi.transpose(), phi)
pinv = np.linalg.inv(pinv)
w = np.dot(pinv, phi.transpose())
w = np.dot(w,t)
return w, phi
def getPhi(x, M):
try: #x not a scalar value
phi = np.mat(np.zeros((x.shape[0],M+1)))
for i in range(phi.shape[0]):
for j in range(phi.shape[1]):
if j==0:
phi[i,j] = 1
phi[i,j] = np.power(x[i],j)
except: #scalar value
phi = np.zeros(M+1)
phi = phi.T
for i in range(phi.shape[0]):
if i==0:
phi[i] = 1
phi[i] = np.power(x, i)
return phi
def smoothing(M, count):
'''
function that creates a smoother plot since it creates a phi from more
data points
'''
x = np.linspace(0, 2*np.pi,count, endpoint = True)
phi = np.mat(np.zeros((x.shape[0],M+1)))
for i in range(phi.shape[0]):
for j in range(phi.shape[1]):
if j==0:
phi[i,j] = 1
phi[i,j] = np.power(x[i],j)
return phi,x
def calculateError(w, valid_phi, valid_data_t, lamd):
#calculate the error of the regression
temp = np.dot(valid_phi,w.transpose()) - valid_data_t
temp2 = np.dot(valid_phi,w.transpose()) - valid_data_t
Ew = np.dot(np.transpose(temp),temp2)
temp3 = np.dot((lamd/2),w)
temp3 = np.dot(temp3,w.transpose())
Ew = Ew + temp3
return Ew.item(0)
def kfold_indices(N, k):
all_indices = np.arange(N,dtype=int)
np.random.shuffle(all_indices)
idx = np.floor(np.linspace(0,N,k+1))
train_folds = []
valid_folds = []
for fold in range(k):
valid_indices = all_indices[idx[fold]:idx[fold+1]]
valid_folds.append(valid_indices)
train_folds.append(np.setdiff1d(all_indices, valid_indices))
return train_folds, valid_folds
def cross_validation(x, t, train_folds, valid_folds):
all_MSE_errors = {}
#iterate over possible M and lamda
for M in range(11):
for l in range(10, -1, -1):
lamd = np.exp(-l)
errorS_fold = []
#get folds
for (train_fold, valid_fold) in (zip(train_folds, valid_folds)):
#initialize training and testing data
train_data = np.zeros(train_fold.size)
train_data_t = np.zeros(train_fold.size)
valid_data = np.zeros(valid_fold.size)
valid_data_t = np.zeros(valid_fold.size)
#make training set
for (i, index) in (zip(range(train_data.size), train_fold)):
train_data[i] = x[index]
train_data_t[i] = t[index]
#make test set
for (i, index) in (zip(range(valid_data.size), valid_fold)):
valid_data[i] = x[index]
valid_data_t[i] = t[index]
#make the model based on training data
w, phi = fit_polynomial_reg(train_data, train_data_t, M, lamd)
valid_phi = getPhi(valid_data, M)
error = calculateError(w, valid_phi, valid_data_t, lamd)
#make the list with the errors for each fold
errorS_fold.append(error)
#append to the dictionary of errors the error for M,lamda
all_MSE_errors[M,l] = calcMSE(errorS_fold)
#find the best M and lamda
bestM, bestL = findBestParameters(all_MSE_errors)
return bestM, bestL, all_MSE_errors
def calcMSE(errorS_fold):
#return appropriate error so the visualization is clearer
return np.log(np.mean(errorS_fold,dtype=np.float64))
def findBestParameters(all_MSE_errors):
#find the best M and lambda according to the dictionary of the errors
bestfit = min(all_MSE_errors, key=all_MSE_errors.get)
bestM = bestfit[0]
bestL = bestfit[1]
return bestM, bestL
def plot_M_lamda_error(all_MSE_errors, bestM, bestL):
datapoints = []
bestError = all_MSE_errors[bestM, bestL]
for item in all_MSE_errors.iteritems():
#create tuples of data points where x = M, y = lambda, z = error
datapoints.append((item[0][0],item[0][1],item[1]))
#create the vector of x,y,z
x,y,z = zip(*datapoints)
#create a flat surface
xi = np.linspace(min(x), max(x), 100)
yi = np.linspace(min(y), max(y), 100)
# interpolate for missing values
#and use zi as the height of the surface in specific points
zi = griddata((x, y), z, (xi[None,:], yi[:,None]), method='cubic')
xim, yim = np.meshgrid(xi, yi)
bestxi = np.linspace(bestM, bestM, 1)
bestyi = np.linspace(bestL, bestL, 1)
fig = plt.figure("Error for different M and lamda")
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(xim, yim, zi, cmap = cm.coolwarm)
ax.plot(bestxi, bestyi, bestError, 'ro', label = 'minimum error')
text='[M:'+str(int(bestM))+', lamda:'+str(int(bestL))+', error:'+str("%.2f" % round(bestError,2))+']'
x2, y2, _ = proj3d.proj_transform(bestM,bestL,bestError, ax.get_proj())
pylab.annotate(text,
xycoords='data',
xy = (x2, y2), xytext = (0, 0),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5))
ax.set_xlabel('M')
ax.set_ylabel('lamda')
ax.set_zlabel('Error')
ax.view_init(5,-110)
ax.legend()
#generate data
x,t = gen_sinusoidal(9)
M_list = (0,1,3,8)
plot_unregularized(x, t, M_list)
#perform the cross validation and plot the best result and the error plot
train_folds,valid_folds = kfold_indices(len(x), 9)
bestM, bestL, all_MSE_errors = cross_validation(x, t, train_folds, valid_folds)
plot_M_lamda_error(all_MSE_errors, bestM, bestL)
'''
plotregularized = 0
if plotregularized == 0:
#define lamda
lamd = bestL
#print lamd
#regularized M = 0
plt.figure("regularized linear regression with lamda exp(-"+str(lamd)+")")
plt.subplot(221)
M = 0
w,phi = fit_polynomial_reg(x, t, M, lamd)
smooth,xpol = smoothing(M, 100)
plt.plot(x,t,'co')
plt.plot(xpol,np.sin(xpol), 'green', label = "original")
plt.plot(xpol, np.dot(smooth, w.transpose()), 'blue', label = "estimated")
plt.legend()
#regularized M = 1
#plt.figure(2)
plt.subplot(222)
M = 1
w,phi = fit_polynomial_reg(x, t, M, lamd)
smooth,xpol = smoothing(M, 100)
plt.plot(x,t,'co')
plt.plot(xpol,np.sin(xpol), 'green')
plt.plot(xpol, np.dot(smooth, w.transpose()), 'blue')
#regularized M = 3
#plt.figure(2)
plt.subplot(223)
M = 6
w,phi = fit_polynomial_reg(x, t, M, lamd)
smooth,xpol = smoothing(M, 100)
plt.plot(x,t,'co')
plt.plot(xpol,np.sin(xpol), 'green')
plt.plot(xpol, np.dot(smooth, w.transpose()), 'blue')
#regularized M = 9
#plt.figure(2)
plt.subplot(224)
M = 8
w,phi = fit_polynomial_reg(x, t, M, lamd)
smooth,xpol = smoothing(M, 100)
plt.plot(x,t,'co')
plt.plot(xpol,np.sin(xpol), 'green')
plt.plot(xpol, np.dot(smooth, w.transpose()), 'blue')
'''
plt.figure("best model according to cross-validation")
labelML = "best M: " + str(bestM)+ " best lamda: exp(-" + str(bestL)+")"
M = bestM
w,phi = fit_polynomial_reg(x, t, M, bestL)
smooth,xpol = smoothing(M, 100)
plt.plot(x,t,'co')
plt.plot(xpol,np.sin(xpol), 'green', label="original")
plt.plot(xpol, np.dot(smooth, w.transpose()), 'blue', label = labelML)
plt.legend()
#show figure
plt.show()
|
from tkinter import ttk
from tkinter import *
import gui
from tkvideoplayer import tkvideo
# console output :
verbose = 0
# predefined values
minimumPoints = 20
RadiusLimit = 0
requestedpoints = 500
new_width = 600
kernel_list = ['laplace4', 'laplace2', 'X sobel', 'Y sobel']
order = 20 # AANTAL CIRKELS: order = divided by two rounded down
Seconds = 5
video_path = "epicycle.mp4"
# Gui Vars:
CanvasWidth = 300
CanvasHeight = 300
vidWidth = 500
vidHeight = 500
# design gui:
root = Tk()
root.title("Two dimentional Fourier Transformation") # replace title
root.geometry(str(CanvasWidth * 4 + 50) + "x" + str(CanvasHeight + vidHeight + 100))
# create a main frame
main_frame = Frame(root)
main_frame.pack(fill=BOTH, expand=1)
# create a canvas
my_canvas = Canvas(main_frame)
my_canvas.pack(side=LEFT, fill=BOTH, expand=1)
# Add A Scrollbar To The Canvas
my_scrollbar = ttk.Scrollbar(main_frame, orient=VERTICAL, command=my_canvas.yview)
my_scrollbar.pack(side=RIGHT, fill=Y)
# Configure The Canvas
my_canvas.configure(yscrollcommand=my_scrollbar.set)
my_canvas.bind('<Configure>', lambda e: my_canvas.configure(scrollregion=my_canvas.bbox("all")))
# Create ANOTHER Frame INSIDE the Canvas
content = Frame(my_canvas)
# Add that New frame To a Window In The Canvas
my_canvas.create_window((0, 0), window=content, anchor="nw")
# padding
p1 = Label(content, width=3)
p1.grid(row=0, column=0, sticky=W, pady=2)
p1 = Label(content, height=60)
p1.grid(row=20, column=4, sticky=W, pady=2)
# gui variables
edgeThinning = BooleanVar()
kernel_var = StringVar(content)
kernel_var.set(kernel_list[0])
# Title
l_title = Label(content, text="Two dimentional Fourier Transformation", font=("Times", "24", "bold italic"))
l_title.grid(row=0, column=1, columnspan=5, sticky=W, pady=2)
# Input (row 1->3
c_in = Canvas(content, width=CanvasWidth, height=CanvasHeight)
c_in.grid(row=1, column=1, columnspan=3, rowspan=1, sticky=W, pady=2)
c_in.configure(bg='grey')
l_in1 = Label(content, text="filename", width=27, anchor="e")
l_in1.grid(row=2, column=1, columnspan=2, sticky=W, pady=2)
b_in1 = Button(content, text='select file', command=lambda: gui.upload(c_in, l_in1, CanvasWidth, CanvasHeight))
b_in1.grid(row=2, column=3, sticky=W, pady=2)
l_in2 = Label(content, text="New width:", width=27, anchor="e")
l_in2.grid(row=3, column=1, columnspan=2, sticky=W, pady=2)
t_in2 = Text(content, height=1, width=10, font=("Courier", 10, "bold"))
t_in2.grid(row=3, column=3, columnspan=1, sticky=W, pady=2)
t_in2.insert(INSERT, new_width)
text = "With this GUI, you can import a picture and play with it to try and find a contour approximate by a fourier ser"
text = text + "ies. With the use of the button ‘select file’ you can choose a file on your computer to start the analys"
text = text + "ation. This Application will generate outputs on your current location. The ratio will remain the same, "
text = text + "but the size of the image will be defined by the width. We recommend taking visually simple images. By t"
text = text + "his we mean images with clearly defined features. This will help you find borders more clearly and gener"
text = text + "ate better results by extension. "
l_text1 = Label(content, text=text, wraplength=500, justify=LEFT)
l_text1.grid(row=1, column=4, columnspan=2, sticky=W, pady=2)
# Border detection (row 4->8)
l_bd1 = Label(content, text="Step 1: Border detection", width=27, anchor="w", font=("Helvetica", 16, "bold"))
l_bd1.grid(row=4, column=1, columnspan=3, sticky=W, pady=2)
text = "For the first part of the process, you must find the borders of the image. By using different kernels you will find different results. Try some out until you are satisfied with the result. Edge thinning will reduce noise over the picture. Depending on the picture, edge thinning can be an advantage or an inconvenience. "
l_text1 = Label(content, text=text, wraplength=300, justify=LEFT)
l_text1.grid(row=5, column=5, sticky=W, pady=2)
l_bd2 = Label(content, text="Border detection kernel:", width=27, anchor="e")
l_bd2.grid(row=5, column=1, columnspan=2, sticky=W, pady=2)
m_bd = OptionMenu(content, kernel_var, *kernel_list)
m_bd.grid(row=5, column=3, columnspan=1, rowspan=1, sticky=W, pady=2)
l_bd3 = Label(content, text="Edge thinning:", width=27, anchor="e")
l_bd3.grid(row=7, column=1, columnspan=2, sticky=W, pady=2)
c1 = Checkbutton(content, onvalue=1, offvalue=0, variable=edgeThinning)
c1.grid(row=7, column=3, columnspan=1, rowspan=1, sticky=W, pady=2)
edgeThinning.set(True)
c_bd = Canvas(content, width=CanvasWidth, height=CanvasHeight)
c_bd.grid(row=4, column=4, columnspan=1, rowspan=5, sticky=W, pady=2)
c_bd.configure(bg='grey')
b_bd = Button(content, text='run', command=lambda: gui.executeDB(l_in1['text'], c_bd, kernel_var.get(), edgeThinning.get(), CanvasWidth, CanvasHeight))
b_bd.grid(row=8, column=1, sticky=W, pady=2)
# Contour detection (row 8->13)
l_cd1 = Label(content, text="Step 2: Contour detection", width=27, anchor="w", font=("Courier", 16, "bold"))
l_cd1.grid(row=9, column=1, columnspan=3, sticky=W, pady=2)
text = "In this step we will run a crawler over the picture. The highest point on your picture will be the starting point of the crawler over the picture. By setting the radius limit as a non-zero positive number, you can limit the distance between two jumps of the crawler. By setting a minimum amount of points to be returned, you can let the crawler search for more pixels, and let it cover a bigger part of the image if needed. Finally, by requesting a higher amount of points to be returned, you can increase the resolution of the found contour."
l_text1 = Label(content, text=text, wraplength=300, justify=LEFT)
l_text1.grid(row=10, column=5, sticky=W, pady=2)
l_cd2 = Label(content, text="minimum points detected:", width=27, anchor="e")
l_cd2.grid(row=10, column=1, columnspan=2, sticky=W, pady=2)
t_cd2 = Text(content, height=1, width=10, font=("Courier", 10, "bold"))
t_cd2.grid(row=10, column=3, columnspan=1, sticky=W, pady=2)
t_cd2.insert(INSERT, minimumPoints)
l_cd3 = Label(content, text="Crawler Radius limit:")
l_cd3.grid(row=11, column=1, columnspan=4, sticky=W, pady=2)
t_cd3 = Text(content, height=1, width=10, font=("Courier", 10, "bold"))
t_cd3.grid(row=11, column=3, columnspan=1, rowspan=1, sticky=W, pady=2)
t_cd3.insert(INSERT, RadiusLimit)
l_cd4 = Label(content, text="request output points:")
l_cd4.grid(row=12, column=1, columnspan=4, sticky=W, pady=2)
t_cd4 = Text(content, height=1, width=10, font=("Courier", 10, "bold"))
t_cd4.grid(row=12, column=3, columnspan=1, rowspan=1, sticky=W, pady=2)
t_cd4.insert(INSERT, requestedpoints)
b_cd = Button(content, text='run',
command=lambda: gui.executeCB(c_cd, int(t_cd3.get("1.0", END)), int(t_cd2.get("1.0", END)),
int(t_cd4.get("1.0", END)), int(t_in2.get("1.0", END)), CanvasWidth,
CanvasHeight))
b_cd.grid(row=13, column=1, sticky=W, pady=2)
c_cd = Canvas(content, width=CanvasWidth, height=CanvasHeight)
c_cd.grid(row=9, column=4, columnspan=9, rowspan=5, sticky=W, pady=2)
c_cd.configure(bg='grey')
# Fourier transfer (row 14->18)
l_ft1 = Label(content, text="Step 3: Fourier Transform", width=27, anchor="w", font=("Courier", 16, "bold"))
l_ft1.grid(row=14, column=1, columnspan=3, sticky=W, pady=2)
text = "For the last step, start by defining the amount of circles you want in the approximation of your edge. This is dependent on the order (= 2x circles). The higher the amount of circles, the closer the fourier series will be to the original contour. You can also specify the playback speed of the loop in seconds."
l_text1 = Label(content, text=text, wraplength=300, justify=LEFT)
l_text1.grid(row=15, column=4, columnspan=2, sticky=W, pady=2)
l_ft2 = Label(content, text="Fourier order:")
l_ft2.grid(row=15, column=1, columnspan=4, sticky=W, pady=2)
t_ft2 = Text(content, height=1, width=10, font=("Courier", 10, "bold"))
t_ft2.grid(row=15, column=3, columnspan=1, rowspan=1, sticky=W, pady=2)
t_ft2.insert(INSERT, order)
l_ft3 = Label(content, text="Duration of the loop (s):")
l_ft3.grid(row=16, column=1, columnspan=4, sticky=W, pady=2)
t_ft3 = Text(content, height=1, width=10, font=("Courier", 10, "bold"))
t_ft3.grid(row=16, column=3, columnspan=1, rowspan=1, sticky=W, pady=2)
t_ft3.insert(INSERT, Seconds)
b_ft = Button(content, text='run',
command=lambda: gui.executeFT(video_path, int(t_ft2.get("1.0", END)), int(t_ft3.get("1.0", END)), player))
b_ft.grid(row=18, column=1, sticky=W, pady=2)
video_label = Label(content, anchor="n")
video_label.grid(row=19, column=4, rowspan=10, columnspan=2, sticky=W, pady=2)
player = tkvideo(video_label, loop=1, size=(int(CanvasHeight * 2), int(CanvasHeight * 2)))
root.mainloop()
|
import multiprocessing
import importlib
def create(module_name):
def pipe_process(pipe):
lib = importlib.import_module(module_name)
func = getattr(lib, module_name)
pipe.send('Okay :-)')
while 1:
data = pipe.recv()
try:
poem = func(data)
pipe.send(poem)
except Exception as e:
pipe.send(e)
pipes = multiprocessing.Pipe() # W/R
proc = multiprocessing.Process(target=pipe_process, args=(pipes[1],))
proc.start()
# wait to start
print (pipes[0].recv())
def handle(img_feature):
pipes[0].send(img_feature)
msg = pipes[0].recv()
if isinstance(msg, Exception):
raise msg
return msg
return handle
if __name__ == '__main__':
create('generate_poem')
|
numbers = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
style_param = {
"weights": [0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2],
"descriptions": ["very poor", "poor", "little", "mild", "normal", "strong", "very strong"]
} |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from .base_api_resource import BaseAPIResource
from .data_models import EligibilityRequirement
class EligibilityRequirements(BaseAPIResource):
list_api_endpoint = "eligibility-requirements/"
count_api_endpoint = "eligibility-count/"
@classmethod
def list(cls) -> List[EligibilityRequirement]:
response_json = cls.get(cls.list_api_endpoint)
eligibility_requirements = [EligibilityRequirement(**s) for s in response_json["results"]]
return eligibility_requirements
@classmethod
def count_participants(cls) -> int:
response_json = cls.post(cls.count_api_endpoint)
return response_json["count"]
|
import serial
import datetime
import os
import sys
import stat
import json
import time
import pmDatabase
import pmWiFi
from threading import Thread
#========================================
#Permissions
#Change the serial port access permission
#os.chmod('/dev/serial0', stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
#for sensor channel
command = 'chmod 777 /dev/serial0'
permission_command = os.system('echo %s|sudo -S %s' % ('', command))
#For data storing path
data_store_path = '/home/pi/Desktop/sensor-data/'
data_store_command = 'chmod 777 ' + data_store_path
data_store_permission_command = os.system('echo %s|sudo -S %s' % ('', data_store_command))
#giving permission to use serial profile for the bluetooth connection
command = 'hciconfig hci0 piscan'
serial_profile_bt_command = os.system('echo %s|sudo -S %s' % ('', command))
serial_channel = serial.Serial('/dev/serial0', baudrate=9600, timeout=1)
#========================================
#Setup
def setup():
global serial_channel
try:
#Open a serial communication channel to recieve data from the PM sensor
serial_channel = serial.Serial('/dev/serial0', baudrate=9600, timeout=1)
return serial_channel
except BaseException as e:
print(str(e))
#========================================
#Function to check and read data from channel
#First checks if correct datastrem by matching the first two fixed bytes
#The first two bytes are 0x42 and 0x4d
def pm_reader(channel):
recieved = b''
fixed1 = channel.read()
if fixed1 == b'\x42':
fixed2 = channel.read()
if fixed2 == b'\x4d':
recieved += fixed1 + fixed2
recieved += channel.read(28)
return recieved
#========================================
#Function to get resulting reading from sensor
def get_result(channel):
recieved = pm_reader(channel)
if not (recieved == None):
#formatting data because each piece of data is represented by two bytes
result = {'timestamp': datetime.datetime.now(),
'apm10': recieved[4] * 256 + recieved[5],
'apm25': recieved[6] * 256 + recieved[7],
'apm100': recieved[8] * 256 + recieved[9],
'pm10': recieved[10] * 256 + recieved[11],
'pm25': recieved[12] * 256 + recieved[13],
'pm100': recieved[14] * 256 + recieved[15],
'gt03um': recieved[16] * 256 + recieved[17],
'gt05um': recieved[18] * 256 + recieved[19],
'gt10um': recieved[20] * 256 + recieved[21],
'gt25um': recieved[22] * 256 + recieved[23],
'gt50um': recieved[24] * 256 + recieved[25],
'gt100um': recieved[26] * 256 + recieved[27]
}
return result
#========================================
#Function to extract the data from the read stream and send it to the server
def wifi_data_extract(channel , session , indoor_):
try:
result = get_result(channel)
while(result == None):
result = get_result(channel)
if not (result == None):
timestamp = str(result['timestamp']).split('.')[0]
line = '[{\'timestamp\': '+ str(timestamp) +', \'pm2.5\': '+ str(result['pm25']) + '}]'
print(result['pm25'] + result['pm10'])
write_to_db(session, str(result['timestamp']), result['pm25'] + result['pm10'], 0 , indoor_)
return 'zaba6'
except BaseException as e:
print(" wifi data extract error " + str(e))
on_exit_handler(channel)
setup()
return '0<9'
#========================================
#Function to handle writing data to the Pi
#Each year has a folder and each week's reading are saved in a single file
#Each line has a timestamp and the PM 2.5 standard reading from the sensor
def write_to_db(session, timestamp, pm, isSent, indoor_):
pmDatabase.insert_(session, timestamp, pm, isSent, indoor_)
print('done writing to db')
#========================================
#Function for the clean up before exiting
def on_exit_handler (serial_channel):
serial_channel.close()
time.sleep(0.02)
print('exit')
#========================================
#Function to catch termination
def set_exit_handler(func):
import signal
signal.signal(signal.SIGTERM, func)
signal.signal(signal.SIGTSTP, func) |
import random
def main():
play = "y"
cScore = 0
uScore = 0
while play == "y":
computer = random.randint(0,2)
userChoice = "spock"
computerChoice = "spock2"
print("Choose [R]ock, [P]aper, or [S]cissors: ", end="")
user = "t"
while user.lower() == "t":
user = input()
user = user.lower()
if user == "r":
userChoice = "Rock"
elif user == "p":
userChoice = "Paper"
elif user == "s":
userChoice = "Scissors"
elif user == "q":
print("Quitter!")
else:
print("INVALID! Please type R ,P, S or Q for quit: ", end="")
user = "t"
if computer == 0:
computerChoice = "Rock"
elif computer == 1:
computerChoice = "Paper"
else: computerChoice = "Scissors"
print("Your Choice was: ", userChoice)
print("computer chose: ", computerChoice)
if userChoice == computerChoice:
print ("It's a tie!!")
elif computerChoice == "Paper":
if userChoice == "Rock":
print("Computer wins")
cScore += 1
else:
print("You win!!")
uScore +=1
elif computerChoice == "Rock":
if userChoice == "Scissors":
print("Computer wins")
cScore += 1
else:
print("You win!!")
uScore += 1
elif computerChoice == "Scissors":
if userChoice == "Paper":
print("Computer wins")
cScore += 1
else:
print("You win!!")
uScore += 1
print("You scored: ", uScore, "Computer Scored: ", cScore)
print("play again?: ", end="")
play = input()
if __name__ == '__main__':
main() |
#sort array of 0's ,1's and 2's
#Method 1
#use sorting algorith to sort the given array
#T.C=O(NlogN)(merge sort)
#Method 2
#use counting sort
#Method 3
#using Dutch National Flag Algorihtm
#T.c =
def sort(arr):
if(len(arr)<=1):
return arr
lp=mp=0
hp=len(arr)-1
while(mp<=hp):
if arr[mp] == 0 :
arr[mp],arr[lp]=arr[lp],arr[mp]
lp+=1
mp+=1
elif arr[mp] == 1 :
mp+=1
else:
arr[hp],arr[mp]=arr[mp],arr[hp]
hp-=1
return arr
arr=[2,0,1,1,1,0,2,1]
print(sort(arr))
|
def palindrome(n):
result = True
for i in range(0, len(n) // 2):
if n[i] != n[-i - 1]:
result = False
break
return result
if __name__ == "__main__":#https://stackoverflow.com/questions/419163/what-does-if-name-main-do
#how to use __name__ == "__main__
input_str = input("Please input a string:\n")
if palindrome(input_str):
print("%s is a palindrome" % input_str)
else:
print("%s is not a palindrome" % input_str) |
import json
import os, sys
CONFIG_FILE = '.sryapi-cli.json'
class SRYClientPlugin(object):
"""
Abstract base class for plugins providing their own commands. Each subclass
must implement `register` methods.
"""
def __init__(self, runner):
self.runner = runner
def _before_register(self, parser):
self.parser = parser
def register(self):
raise NotImplementedError('Plugin must implement `register` method.')
def set_command(self, *args, **kwargs):
"""Define new command.
For accepted arguments, see `argparse.ArgumentParser.add_argument`.
"""
if 'help' not in kwargs:
kwargs['help'] = ''
cmd = self.parser.add_parser(*args, **kwargs)
self.subparsers = cmd.add_subparsers(metavar='ACTION')
def add_action(self, *args, **kwargs):
return self.subparsers.add_parser(*args, **kwargs)
def _save_config(self, configs):
with open(CONFIG_FILE, 'w') as f:
f.write(json.dumps(configs))
def _get_config(self):
try:
if os.path.exists(CONFIG_FILE):
with open(CONFIG_FILE, 'r') as f:
config_json = json.loads(f.read())
return config_json
else:
raise IOError
except IOError:
print('Please authenticate with email and password firstly')
sys.exit(1)
def _delete_config(self):
if os.path.exists(CONFIG_FILE):
os.remove(CONFIG_FILE)
|
import sqlite3
class SQLighter:
def __init__(self, db_name):
self._connection = sqlite3.connect(db_name)
self._db_field = {"IntField": "INTEGER", "StringField": "TEXT"}
self.cursor = self._connection.cursor()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._connection.close()
def create_table(self, fields: dict, table_name):
self.cursor.execute(f"DROP TABLE IF EXISTS {table_name}")
fields = ','.join(map(lambda item: f"{item[0]} {self._db_field[type(item[1]).__name__]}",
fields.items()))
sql_query = f'CREATE TABLE {table_name}' \
f'(pk INTEGER PRIMARY KEY AUTOINCREMENT,' \
f'{fields})'
self.cursor.execute(sql_query)
def create_record(self, instance):
pk = instance.__dict__.get('pk', None)
table_name = instance._table_name
col_names = tuple(instance._updated_fields.keys()) # (id, name, ...)
col_values = tuple(instance._updated_fields.values()) # (3, Jone)
if pk is None:
# insert
placeholders = ",".join("?"*len(col_values)) # получаем строку вида (?, ?, ...)
sql_query = f'INSERT INTO {table_name} {col_names} VALUES ({placeholders})'
print(sql_query)
self.cursor.execute(sql_query, col_values)
self._connection.commit()
pk = self.cursor.execute("SELECT last_insert_rowid()").fetchone()[0]
setattr(instance, 'pk', pk)
instance._updated_fields = {}
else:
self.update_record(instance)
def delete_record(self, instance):
attrs = vars(instance)
pk = attrs.get('pk', None)
table_name = instance._table_name
sql_query = f"DELETE FROM {table_name} WHERE pk = '{pk}'"
self.cursor.execute(sql_query)
self._connection.commit()
def update_record(self, instance):
table_name = instance._table_name
col_names = tuple(instance._updated_fields.keys()) # (id, name, ...)
col_values = tuple(instance._updated_fields.values()) # (3, Jone)
placeholders = ','.join(map(lambda key: f"{key}=?", col_names)) # col1=?, col2=?
sql_query = f"UPDATE {table_name} " \
f"SET {placeholders}" \
f"WHERE pk=?"
print(sql_query)
self.cursor.execute(sql_query, (*col_values, instance.pk))
# После создания|обновления записи в бд, в updated_field кладем пустое значение
instance._updated_fields = {}
def get_record(self, instance, attrs: dict):
table_name = instance.model_cls._table_name
placeholders = ','.join(map(lambda key: f"{key}=?", attrs))
sql_query = f"SELECT * FROM {table_name} WHERE {placeholders}"
print("get_record", sql_query)
return self.cursor.execute(sql_query, tuple(attrs.values())).fetchall() |
import nfc
import threading
import binascii
def decode(device):
return binascii.b2a_hex(device).decode("utf-8")
def on_startup(targets):
print("Esperando un dispositivo (nfc)...")
return targets
def on_connect(tag):
print("New contactless device detected, ID #",decode(tag.identifier))
def scan():
with nfc.ContactlessFrontend('tty:USB0:pn532') as clf:
tag = clf.connect(rdwr={'targets': ['106A'],'on-startup': on_startup,'on-connect': on_connect})
if tag.ndef:
print(tag.ndef.message.pretty())
return (decode(tag.identifier)) |
#!/usr/bin/env python3
"""Parse timestamps from log file to convert it to ISO.
Usage:
sgf-parse-log.py [--tz TIMEZONE] [--time-format=TIME_FORMAT]...
[<logfile>]...
sgf-parse-log.py (-h | --help)
sgf-parse-log.py --version
Argiments:
<logfile> Log file to read [default: stdin].
Options:
--tz TIMEZONE Timezone of the timestamps in the log file
[default: 'America/Toronto'].
--time-format=TIME_FORMAT Time format of the timestamps in the log file
[default: 'YYYY MMM D HH:mm:ss'].
It can be specified multiple times.
-h --help Show this screen.
--version Show version.
"""
import sys
import arrow
import dateutil
from docopt import docopt
def read_input(logfile):
if not logfile:
lines = sys.stdin.readlines()
for line in lines:
yield(line)
else:
for infile in arguments['<logfile>']:
infp = open(infile, 'r')
for line in infp:
yield(line)
if __name__ == '__main__':
arguments = docopt(__doc__, version='sgf-stats.py 0.2')
logfile = arguments['<logfile>']
tz = arguments['--tz']
time_formats = arguments['--time-format']
for line in read_input(logfile):
line = line.split()
year = arrow.now().year
month = line[0]
day = line[1]
time = line[2]
country = line[-1].strip('()')
ip_address = line[-2]
timestamp = arrow.get('{year} {month} {day} {time}'.format(year=year,
month=month,
day=day,
time=time),
[tf for tf in time_formats]
).replace(tzinfo=dateutil.tz.gettz(tz))
print(timestamp, country, ip_address, flush=True)
exit(0)
|
import time
# This sleep delays initializing things until after the pi finishes booting (this script is run at boot time using cron).
time.sleep(15)
import datetime
import threading
import subprocess
import sys
import os
import json
import queue
import RPi.GPIO as GPIO
from ringmybell.ringbell_reply import ringbell_reply
from ringmybell.ringbell_reply import nighttime_reply
import twython
home = os.path.expanduser("~")
auth_file = home + "/.twitterkey/auth.json"
with open(auth_file, 'r') as af:
key = json.load(af)
consumer_key = key["consumer_key"]
consumer_secret = key["consumer_secret"]
access_token = key["access_token"]
access_token_secret = key["access_token_secret"]
# Create the queue objects
dayQ = queue.Queue()
nightQ = queue.Queue()
# On and Off hours
start=datetime.time(7,30)
end=datetime.time(20,30)
# Class to contain tweet bits
class Tweet:
def __init__(self, username, text, tweetid):
self.username=username
self.text=text
self.id=tweetid
# Class for the twitter streamer
class MyStreamer(twython.TwythonStreamer):
def on_success(self, data):
global dayQ,nightQ,start,end
timestamp=datetime.datetime.now().time()
newTweet = Tweet(data['user']['screen_name'], data['text'],str(data['id']))
if start <= timestamp <= end :
dayQ.put(newTweet)
else :
nightQ.put(newTweet)
print("@%s: %s" % (newTweet.username, newTweet.text))
# Function for the twitter streaming thread
def streaming(*args):
stream = MyStreamer(
consumer_key,
consumer_secret,
access_token,
access_token_secret
)
stream.statuses.filter(track='@ringandysbell')
# Function for ringing and replying thread
def worker(*args):
global dayQ,nightQ,start,end
while True:
if start <= datetime.datetime.now().time() <= end and not dayQ.empty():
item=dayQ.get()
ringbell_reply(item)
if not nightQ.empty() :
item=nightQ.get()
nighttime_reply(item)
dayQ.put(item)
time.sleep(1)
t1 = threading.Thread(target=streaming)
t2 = threading.Thread(target=worker)
def main():
t1.start()
t2.start()
#t1.join()
#t2.join()
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import collections, os, random
from StringIO import StringIO
import muz
import muz.assets
import muz.vfs as vfs
from muz.beatmap import log, formats
class NoteError(Exception):
pass
class Note(object):
def __init__(self, band, hitTime, holdTime):
try:
self.hitTime = int(hitTime)
assert self.hitTime >= 0
except Exception:
raise NoteError("bad hit time")
try:
self.holdTime = int(holdTime)
assert self.holdTime >= 0
except Exception:
raise NoteError("bad hold time")
try:
self.band = int(band)
assert self.band >= 0
except Exception:
raise NoteError("bad band number")
def __repr__(self):
return "Note(%s, %s, %s)" % (repr(self.band), repr(self.hitTime), repr(self.holdTime))
def __str__(self):
return repr(self)
class Metadata(collections.MutableMapping):
def __init__(self, *args, **kwargs):
self.store = dict()
self.update(dict(*args, **kwargs))
def __getitem__(self, key):
if key in self.store:
return self.store[key]
return u""
def __setitem__(self, key, value):
if not isinstance(value, unicode):
if not isinstance(value, str):
value = str(value)
self.store[key] = value.decode('utf-8')
def __delitem__(self, key):
del self.store[key]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __repr__(self):
return "Metadata(%s)" % repr(self.store)
class Beatmap(collections.MutableSequence):
def __init__(self, name, numbands, music=None, musicFile=None, source=None, meta=None, vfsNode=None):
self.notelist = []
self._meta = Metadata()
self.name = name
self.music = music
self._musicFile = musicFile
self.vfsNode = vfsNode
self.noterate = 1.0
try:
self.numbands = int(numbands)
assert self.numbands >= 1
except Exception:
raise ValueError("invalid amount of bands")
if source is not None:
self.extend(source)
if meta is not None:
self.meta.update(meta)
def clone(self):
bmap = Beatmap(self.name, self.numbands, source=self, meta=self.meta, vfsNode=self.vfsNode, musicFile=self._musicFile)
bmap.noterate = self.noterate
return bmap
@property
def musicFile(self):
if self._musicFile is not None:
return self._musicFile
return self.musicVfsNode.open()
@property
def musicVfsNode(self):
if self._musicFile is not None:
return vfs.Proxy(self._musicFile)
root = vfs.root
if self.vfsNode is not None:
root = self.vfsNode.parent
try:
return muz.assets.music(self.music, root=root)
except Exception:
log.debug("musicVfsNode: locate failed", exc_info=True)
if self.vfsNode is not None and self.vfsNode.realPathExists:
return vfs.RealFile(os.path.join(os.path.dirname(self.vfsNode.realPath), self.music)).preferAlternative()
raise RuntimeError("music file %s could not be located" % self.music)
@property
def meta(self):
return self._meta
@meta.setter
def meta(self, v):
self._meta.clear()
self._meta.update(v)
def nearest(self, band, time, maxdiff):
o = None
od = maxdiff
for n in self:
if n.band == band:
d = n.hitTime - time
if d >= maxdiff:
break
d = min(abs(d), abs(d + n.holdTime))
if d < od:
o = n
od = d
return o
def shift(self, offset):
for note in self:
note.hitTime += offset
def scale(self, scale):
for note in self:
note.hitTime = int(note.hitTime * scale)
note.holdTime = int(note.holdTime * scale)
def checknote(self, note):
pass
def __len__(self):
return len(self.notelist)
def __getitem__(self, i):
return self.notelist[i]
def __delitem__(self, i):
del self.notelist[i]
def __setitem__(self, i, v):
self.checknote(v)
self.notelist[i] = v
def insert(self, i, v):
self.checknote(v)
self.notelist.insert(i, v)
def __str__(self):
return repr(self)
def __repr__(self):
return "Beatmap(%s, %s, %s, %s, %s, %s)" % (repr(self.name), repr(self.numbands), repr(self.music), repr(self.musicFile), repr(self.notelist), repr(self.meta))
def applyMeta(self):
m = self.meta
lookForArtist = False
# TODO: prefer the UTF-8 variants when we can render the correctly
if m["Music.Name.ASCII"]:
if self.name:
self.name = "%s (%s)" % (m["Music.Name.ASCII"], self.name)
else:
self.name = m["Music.Name.ASCII"]
lookForArtist = True
elif m["Music.Name"]:
if self.name:
self.name = "%s (%s)" % (m["Music.Name"], self.name)
else:
self.name = m["Music.Name"]
lookForArtist = True
if lookForArtist:
if m["Music.Artist.ASCII"]:
self.name = "%s - %s" % (m["Music.Artist.ASCII"], self.name)
elif m["Music.Artist"]:
self.name = "%s - %s" % (m["Music.Artist"], self.name)
if self.name:
if m["Beatmap.Variant.ASCII"]:
self.name = "[%s] %s" % (m["Beatmap.Variant.ASCII"], self.name)
elif m["Beatmap.Variant"]:
self.name = "[%s] %s" % (m["Beatmap.Variant"], self.name)
@property
def minimalNoteDistance(self):
mindist = 0
prev = None
for note in self:
if prev is not None:
d = note.hitTime - prev.hitTime
if not mindist or (d > 20 and d < mindist):
mindist = d
prev = note
return mindist
def randomize(self):
self.fix()
mindist = self.minimalNoteDistance
busy = [0 for band in xrange(self.numbands)]
for note in self:
note.band = random.choice([i for i in xrange(self.numbands) if note.hitTime - busy[i] >= 0])
busy[note.band] = note.hitTime + note.holdTime + mindist
def insanify(self):
self.fix()
mindist = self.minimalNoteDistance
busy = [0 for band in xrange(self.numbands)]
prev = None
for note in tuple(self):
for i in range(1):
if prev is not None and note.hitTime - (prev.hitTime + prev.holdTime * i) >= mindist * 2:
h = prev.hitTime + prev.holdTime * i + (note.hitTime - (prev.hitTime + prev.holdTime * i)) / 2
try:
b = random.choice([i for i in xrange(self.numbands) if h - busy[i] >= 0])
except IndexError:
pass
else:
n = Note(b, h, 0)
self.append(n)
busy[b] = n.hitTime + mindist
busy[note.band] = note.hitTime + note.holdTime + mindist * 2
prev = note
self.fix()
def stripHolds(self):
for note in tuple(self):
if note.holdTime:
t = note.hitTime + note.holdTime
note.holdTime = 0
self.append(Note(note.band, t, 0))
self.fix()
def holdify(self):
newNotes = []
lastNotes = {}
for note in self:
last = lastNotes.get(note.band)
if last is None:
n = Note(note.band, note.hitTime, 0)
lastNotes[note.band] = n
newNotes.append(n)
else:
last.holdTime = note.hitTime - last.hitTime
if note.holdTime:
n = Note(note.band, note.hitTime + note.holdTime, 0)
lastNotes[note.band] = n
newNotes.append(n)
else:
lastNotes[note.band] = None
del self.notelist[:]
self.notelist.extend(newNotes)
self.fix()
def orderBands(self, order):
for note in self:
note.band = order[note.band]
def shuffleBands(self):
o = range(self.numbands)
random.shuffle(o)
self.orderBands(o)
def mirrorBands(self):
self.orderBands(range(self.numbands)[::-1])
def fix(self):
self.notelist.sort(key=lambda note: note.hitTime)
#self.applyMeta()
class BeatmapBuilder(object):
def __init__(self, mapname, numbands, msrclist, meta=None):
if isinstance(msrclist, str) or isinstance(msrclist, unicode):
msrclist = [msrclist]
musfile = None
for musicsource in msrclist:
try:
musfile = vfs.locate(musicsource).open()
except Exception:
log.warning("couldn't load music source %s", repr(musicsource))
assert musfile is not None
self.beatmap = Beatmap(mapname, numbands, music=os.path.split(musfile.name)[-1], musicFile=musfile, meta=meta)
self.pos = 0
self.tactLength = 1000.0
self.bands = []
@property
def bpm(self):
return 60000.0 / (self.tactLength / 4.0)
@bpm.setter
def bpm(self, v):
self.tactLength = (60000.0 / v) * 4.0
@property
def meta(self):
return self.beatmap.meta
@meta.setter
def meta(self, v):
self.beatmap.meta = v
def __call__(self, *bands):
self.bands = bands
return self
def beat(self, delayfract=0.0):
delayfract = self.getDelay(delayfract)
for band in self.bands:
self.beatmap.append(Note(band, self.pos, 0))
self.rawpause(delayfract)
return self
def hold(self, holdfract, delayfract=0.0):
holdfract = self.getDelay(holdfract)
delayfract = self.getDelay(delayfract)
for band in self.bands:
self.beatmap.append(Note(band, self.pos, holdfract))
self.rawpause(delayfract)
return self
def getDelay(self, delayfract):
try:
return sum(self.getDelay(d) for d in delayfract)
except Exception:
if delayfract:
return self.tactLength / float(delayfract)
return 0
def pause(self, delayfract):
self.rawpause(self.getDelay(delayfract))
def rawpause(self, delay):
self.pos += delay
def load(name, bare=False, options=None):
name = str(name)
node = None
wantext = None
if "." in name:
a = name.split(".")
if " " not in a[-1] and "/" not in a[-1]:
wantext = a[-1]
name = ".".join(a[:-1])
log.info("attempting to load beatmap %s", repr(name))
for ext, importer in muz.beatmap.formats.importersByInferExt.items():
if wantext is not None and ext != wantext:
continue
found = False
paths = []
if name.startswith(vfs.VPATH_SEP) or name.startswith(os.path.sep):
paths.append("%s.%s" % (name, ext))
else:
for location in importer.locations:
paths.append("%s/%s.%s" % (location, name, ext))
for path in paths:
try:
node = vfs.locate(path)
except Exception:
log.debug("couldn't load beatmap %s with the %s importer", repr(path), repr(importer.__name__), exc_info=True)
else:
log.info("loading beatmap %s (%s) with the %s importer", repr(name), repr(path), repr(importer.__name__))
found = True
break
if found:
break
if node is None:
raise RuntimeError("No importer available for beatmap %s" % name)
bm = importer.read(node.open(), node.name, bare=bare, options=options)
if bm.vfsNode is None:
bm.vfsNode = node
if not bm.name:
bm.name = name
return bm
def nameFromPath(path):
path = vfs.normalizePath(path)
for ext, importer in muz.beatmap.formats.importersByExt.items():
if not path.endswith("." + ext):
continue
for location in importer.locations:
if path.startswith(location + "/"):
return path[len(location) + 1:]
return None
def export(*bmaps, **kwargs):
format = formats.muz
packtype = vfs.VirtualPack
ifexists = 'remove'
options = None
if "format" in kwargs:
format = kwargs["format"]
if "packType" in kwargs:
packtype = kwargs["packType"]
if "ifExists" in kwargs:
ifexists = kwargs["ifExists"]
if "options" in kwargs:
options = kwargs["options"]
if "name" in kwargs and kwargs["name"] is not None:
name = kwargs["name"]
elif len(bmaps) > 1:
name = "beatmap-pack-%s" % "_".join(m.name for m in bmaps)
else:
name = "beatmap-%s" % bmaps[0].name
pack = packtype(name, ifExists=ifexists)
for bmap in bmaps:
s = StringIO()
newname, mappath, muspath = format.write(bmap, s, options=options)
s.seek(0)
muspath = "%s%s" % (muspath, os.path.splitext(bmap.music)[-1])
with bmap.musicFile as mus:
pack.addFile(muspath, mus)
pack.addFile(mappath, s)
pack.save()
if len(bmaps) > 1:
log.info("exported beatmaps as %s", pack.path)
else:
log.info("exported beatmap %s as %s", bmaps[0].name, pack.path)
|
# Generated by Django 3.2.8 on 2021-10-21 06:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('store', '0004_rename_payment_order_payment_status'),
]
operations = [
migrations.AlterModelOptions(
name='collection',
options={'ordering': ['title']},
),
migrations.AlterModelOptions(
name='customer',
options={'ordering': ['first_name', 'last_name']},
),
migrations.AlterModelOptions(
name='product',
options={'ordering': ['title']},
),
migrations.RemoveField(
model_name='address',
name='id',
),
migrations.AlterField(
model_name='address',
name='customer',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='store.customer'),
),
]
|
#!/usr/bin/python
#coding=UTF-8
import threading
class t_runner:
'''
'''
def __init__(self):
'''
'''
self.cmds=[]
return
class worker(threading.Thread):
'''
'''
def __init__(self,name):
'''
'''
threading.Thread.__init__(self)
self.name=name
self.cmds=[]
self.emsg=None
return
def add_cmd(self,cmd):
'''
'''
self.cmds.append(cmd)
return
def wlog(self,msg):
'''
'''
print(f'[worker({self.name})]{msg}')
return
def run(self):
'''
'''
self.wlog(f'[run]:cmds({len(self.cmds)}).')
self.emsg=None
for cmd in self.cmds:
try:
fobj=cmd[0]
args=cmd[1]
kwargs=cmd[2]
fobj(*args,**kwargs)
except Exception as ex:
self.emsg=f'{ex}'
break
self.wlog(f'[run]:done.emsg({self.emsg})')
return
def wlog(self,msg):
'''
'''
print(f'[t_runner]{msg}')
return
def add_cmd(self,*args,**kwargs):
'''
'''
fobj=args[0]
a=args[1:]
k=kwargs
self.cmds.append([fobj,a,k])
return
def run(self):
'''
'''
wcnt=3
tws=[]
self.wlog(f'[run]:workers({wcnt}) cmds({len(self.cmds)}).')
for index in range(wcnt):
name=f'w{index}'
tw=t_runner.worker(name)
tws.append(tw)
num=0
for cmd in self.cmds:
widx=num%3
tws[widx].add_cmd(cmd)
num+=1
for tw in tws:
tw.setDaemon(True)
tw.start()
for tw in tws:
tw.join()
self.wlog(f'[run]:done.')
return
##
# testing code from here
##
class Individual:
'''
'''
def __init__(self,mid):
'''
'''
self.id=mid
self.fitness=None
return
def __repr__(self):
'''
'''
ret=f'Individual(id:{self.id},fitness:{self.fitness})'
return ret
def main_test():
'''
'''
data_train_list=['ddd']
data_train_future_returns=['ttt']
population=[]
for mid in range(5):
population.append(Individual(mid))
tc=t_runner()
for individual in population:
if individual.fitness is None:
tc.add_cmd(f_optimize,individual,
minibatch_data_list=data_train_list, minibatch_future_returns=data_train_future_returns)
tc.run()
return
import time
def f_optimize(individual,minibatch_data_list=[],minibatch_future_returns=[]):
'''
'''
print(f'[f_optimize]:individual({individual})')
time.sleep(3)
print(f'[f_optimize]:individual({individual}) done.')
return
if __name__=='__main__':
main_test()
|
import sublime
from SublimeGHCi.completions.CompletorFactory import *
def default_completor_factory(ghci_factory):
return CompletorFactory(sublime, ghci_factory) |
import taichi as ti
import numpy as np
import math as m
# ti.init(debug=True, arch=ti.cpu)
ti.init(arch=ti.gpu)
# Equi-Angular Sampling
# Implementation of equi-angular sampling for raymarching through homogenous media
# https://www.shadertoy.com/view/Xdf3zB
GUI_TITLE = "Equi-Angular Sampling"
w, h = wh = (640, 360)
pixels = ti.Vector(3, dt=ti.f32, shape=wh)
## 常量定义
from math import pi as PI
# PI = 3.1415926535
SIGMA = 0.3
STEP_COUNT = 16
DIST_MAX = 10.0
LIGHT_POWER = 12.0
SURFACE_ALBEDO = 0.7
EPS = 0.01
## 辅助函数
@ti.func
def vec3(x):
return ti.Vector([x, x, x])
@ti.func
def normalize(expr):
return (expr).normalized()
@ti.func
def dot(x, y):
return x.dot(y)
@ti.func
def length(v):
return v.norm()
@ti.func
def mix(x, y, a: ti.f32):
return x*(1-a) + y*a
##
## shader code ============================================
@ti.func
def fract(x: ti.f32):
return x - ti.floor(x)
@ti.func
def hash(n: ti.f32):
return fract(ti.sin(n) * 43758.5453123)
@ti.func
def sampleCamera(
fragCoord: ti.Vector, # vec2
u: ti.Vector, # vec2
rayOrigin: ti.Vector # vec3
):
## vec2
filmUv = (fragCoord + u) / ti.Vector([w, h])
## f32
tx = (2.0*filmUv[0] - 1.0) * (w/h)
ty = 1.0 - 2.0*filmUv[1]
tz = 0.0
return normalize(ti.Vector([tx, ty, tz]) - rayOrigin)
# @ti.func sampleCamera
@ti.func
def intersectSphere(
rayOrigin: ti.Vector, # vec3
rayDir: ti.Vector, # vec3
sphereCentre: ti.Vector, # vec3
sphereRadius: ti.f32,
rayT: ti.f32 # [inout]
):
# ray: x = o + dt, sphere: (x - c).(x - c) == r^2
# let p = o - c, solve: (dt + p).(dt + p) == r^2
#
# => (d.d)t^2 + 2(p.d)t + (p.p - r^2) == 0
geomNormal = vec3(0.0)
## vec3
p = rayOrigin - sphereCentre
d = rayDir
## f32
a = dot(d, d)
b = 2.0 * dot(p, d)
c = dot(p, p) - sphereRadius * sphereRadius
q = b*b - 4.0*a*c
if q > 0.0:
## f32
denom = 0.5 / a
z1 = -b * denom
z2 = abs(ti.sqrt(q) * denom)
t1 = z1 - z2
t2 = z1 + z2
intersected = False
if (0.0 < t1) and (t1 < rayT):
intersected = True
rayT = t1
elif (0.0 < t2) and (t2 < rayT):
intersected = True
rayT = t2
if intersected:
geomNormal = normalize(p + d*rayT)
# if (q > 0.0) END
return (rayT, geomNormal)
# @ti.func intersectSphere
@ti.func
def intersectScene(
rayOrigin: ti.Vector, # vec3
rayDir: ti.Vector, # vec3
rayT: ti.f32 # [inout]
):
rayT, geomNormal = intersectSphere(rayOrigin, rayDir, ti.Vector([-0.5, 0.5, 0.3]), 0.25, rayT)
rayT, geomNormal = intersectSphere(rayOrigin, rayDir, ti.Vector([ 0.5, -0.5, 0.3]), 0.25, rayT)
rayT, geomNormal = intersectSphere(rayOrigin, rayDir, ti.Vector([ 0.5, 0.5, 0.3]), 0.25, rayT)
return (rayT, geomNormal)
# @ti.func intersectScene
@ti.func
def sampleUniform(
u: ti.f32,
maxDistance: ti.f32
):
dist = u * maxDistance
pdf = 1.0 / maxDistance
return (dist, pdf)
# @ti.func sampleUniform
@ti.func
def sampleScattering(
u: ti.f32,
maxDistance: ti.f32
):
# remap u to account for finite max distance
## f32
minU = ti.exp(-SIGMA * maxDistance)
a = u*(1.0 - minU) + minU
# sample with pdf proportional to exp(-sig*d)
dist = -ti.log(a) / SIGMA
pdf = SIGMA*a / (1.0 - minU)
return (dist, pdf)
# @ti.func sampleScattering
@ti.func
def sampleEquiAngular(
u: ti.f32,
maxDistance: ti.f32,
rayOrigin: ti.Vector, # vec3
rayDir: ti.Vector, # vec3
lightPos: ti.Vector # vec3
):
# get coord of closest point to light along(infinite) ray
delta = dot(lightPos - rayOrigin, rayDir)
# get distance this point is from light
D = length(rayOrigin + delta*rayDir - lightPos)
# get angle of endpoints
thetaA = ti.atan2(0.0 - delta, D)
thetaB = ti.atan2(maxDistance - delta, D)
# take sample
t = D * ti.tan(mix(thetaA, thetaB, u))
dist = delta + t
pdf = D / ((thetaB - thetaA) * (D*D + t*t))
return (dist, pdf)
# @ti.func sampleEquiAngular
@ti.func
def mainImage(
iTime: ti.f32,
i: ti.i32,
j: ti.i32,
splitCoord: ti.f32
):
"""
## input
t # iTime
[i, j] # fragCoord.xy
## output
fragColor # fragColor
"""
fragCoord = ti.Vector([i, j])
lightPos = ti.Vector([
0.8 * ti.sin(iTime * 7.0 / 4.0),
0.8 * ti.sin(iTime * 5.0 / 4.0),
0.0
])
lightIntensity = vec3(LIGHT_POWER)
surfIntensity = vec3(SURFACE_ALBEDO / PI)
particleIntensity = vec3(1.0 / (4.0*PI))
rayOrigin = ti.Vector([0.0, 0.0, 5.0])
rayDir = sampleCamera(fragCoord, ti.Vector([0.5, 0.5]), rayOrigin)
col = vec3(0.0)
t = DIST_MAX
t, n = intersectScene(rayOrigin, rayDir, t)
if t < DIST_MAX:
# connect surface to light
surfPos = rayOrigin + t*rayDir
lightVec = lightPos - surfPos
lightDir = normalize(lightVec)
cameraDir = -rayDir
nDotL = dot(n, lightDir)
nDotC = dot(n, cameraDir)
# only handle BRDF if entry and exit are same hemisphere
if nDotL*nDotC > 0.0:
d = length(lightVec)
t2 = d
rayDir = normalize(lightVec)
t2, n2 = intersectScene(surfPos + EPS*rayDir, rayDir, t2)
# accumulate surface response if not occluded
if t2 == d:
trans = ti.exp(-SIGMA * (d + t))
geomTerm = abs(nDotL) / dot(lightVec, lightVec)
col = surfIntensity*lightIntensity*geomTerm*trans
# if t2 == d
# if nDotL*nDotC > 0.0
# if t < DIST_MAX
offset = hash(fragCoord[1]*w + fragCoord[0] + iTime)
for stepIndex in range(STEP_COUNT):
u = (stepIndex+offset) / STEP_COUNT
# sample along ray from camera to surface
x, pdf = 0.0, 0.0
if (fragCoord[0] < splitCoord):
x, pdf = sampleScattering(u, t)
else:
x, pdf = sampleEquiAngular(u, t, rayOrigin, rayDir, lightPos)
# adjust for number of ray samples
pdf *= STEP_COUNT
# connect to light and check shadow ray
particlePos = rayOrigin + x*rayDir
lightVec = lightPos - particlePos
d = length(lightVec)
t2 = d
t2, n2 = intersectScene(particlePos, normalize(lightVec), t2)
# accumulate particle response if not occluded
if t2 == d:
trans = ti.exp(-SIGMA * (d + x))
geomTerm = 1.0 / dot(lightVec, lightVec)
col += SIGMA*particleIntensity*lightIntensity*geomTerm*trans/pdf
# if (t2 == d) END
# for stepIndex in range(STEP_COUNT)
# show slider position
if abs(fragCoord[0] - splitCoord) < 1.0:
col[0] = 1.0
col = pow(col, vec3(1.0/2.0))
fragColor = col
return fragColor
# @ti.func mainImage
@ti.func
def genSplitLine():
iMouse = gui.get_cursor_pos()
splitCoord = w / 2
if iMouse[0] != 0.0:
splitCoord = iMouse[0]
return splitCoord
@ti.kernel
def render(t: ti.f32):
splitCoord = genSplitLine()
for i, j in pixels:
pixels[i, j] = mainImage(t, i, j, splitCoord)
return
def main(output_img=False):
"""
img = True # 输出 png
"""
for ts in range(1000000):
if gui.get_event(ti.GUI.ESCAPE):
exit()
render(ts * 0.02)
gui.set_image(pixels.to_numpy())
if output_img:
gui.show(f'frame/{ts:04d}.png')
else:
gui.show()
gui = ti.GUI(GUI_TITLE, res=wh)
if __name__ == '__main__':
# main(img=True)
main()
|
import tkinter as tk
from tkinter import messagebox
from tkinter.font import Font
from gerenciador_db import *
import os
import shutil
import cv2
import face_recognition
from datetime import datetime
fonte = "Helvetica 16 bold"
fonte_pequena = "Helvetica 12"
LOCAL_DIR = os.path.dirname(os.path.realpath(__file__))
foto = []
def cadastro():
def btnFoto_Click():
messagebox.showinfo("Cadastro", "Aperte (F) para tirar a foto")
cap = cv2.VideoCapture(0)
while(True):
if cv2.waitKey(20) & 0xFF == ord('f'):
result = tk.messagebox.askquestion ('Fotografia','Salvar essa imagem?',icon = 'warning')
if result == 'yes':
global foto
foto = frame
break
else:
continue
ret, frame = cap.read()
local_das_faces = face_recognition.face_locations(frame)
if(len(local_das_faces) > 0):
local_das_faces = local_das_faces[0]
cor = (0, 255, 0)
traco = 2
cv2.rectangle(frame, (local_das_faces[3],local_das_faces[0]), (local_das_faces[1], local_das_faces[2]), cor, traco)
cv2.imshow("Camera", frame)
cv2.destroyAllWindows()
cap.release()
def btnSalvar_Click():
nome = txt_nome.get()
if(nome != "" and len(foto) > 0 and lb_nivel.curselection() ):
nivel = lb_nivel.curselection()[0] + 1
imgname = "".join(e for e in str(datetime.now()) if e.isalnum())
if adicionar(nome, "/img/cadastrados/{}.jpg".format(imgname), nivel):
cv2.imwrite(LOCAL_DIR+"/img/cadastrados/{}.jpg".format(imgname), foto)
messagebox.showinfo("Cadastro", "Novo usuario cadastrado com sucesso")
janela_cadastro.destroy()
else:
messagebox.showwarning("ATENCAO", "Preencha o formulario de cadastro antes de salvar!")
#Criando janela cadastro
janela_cadastro = tk.Tk()
janela_cadastro["bg"] = "black"
janela_cadastro.resizable(0, 0)
largura = 400
altura = 500
screen_width = janela_cadastro.winfo_screenwidth()
screen_height = janela_cadastro.winfo_screenheight()
x_cordinate = int((screen_width/2) - (largura/2))
y_cordinate = int((screen_height/2) - (altura/2))
janela_cadastro.geometry("{}x{}+{}+{}".format(largura, altura, x_cordinate, y_cordinate))
#Componentes janela de cadastro
lbl_img = tk.Label(janela_cadastro, text="Foto: ", fg="yellow", bg="black",font=fonte )
lbl_img.pack(fill=tk.X, pady=1)
btnFoto = tk.Button(janela_cadastro, text="Tirar fotografia", bg="yellow", fg="black", command=btnFoto_Click, font=fonte)
btnFoto.pack(fill=tk.X, pady=1)
lbl_nome = tk.Label(janela_cadastro, text="Nome: ", fg="yellow", bg="black", font=fonte)
lbl_nome.pack(fill=tk.X, pady=10)
txt_nome = tk.Entry(janela_cadastro, font=fonte_pequena)
txt_nome.pack(fill=tk.X, pady=1)
lbl_nivel = tk.Label(janela_cadastro, text="Nivel de acesso: ", fg="yellow", bg="black", font=fonte)
lbl_nivel.pack(fill=tk.X, pady=10)
lb_nivel = tk.Listbox(janela_cadastro, font=fonte_pequena)
lb_nivel.insert(0,"Nivel Ministro")
lb_nivel.insert(0,"Nivel Diretor")
lb_nivel.insert(0,"Nivel Publico")
lb_nivel.pack(fill=tk.X)
btnSalvar = tk.Button(janela_cadastro, text="Salvar", bg="yellow", fg="black", command=btnSalvar_Click, font=fonte)
btnSalvar.pack(fill=tk.X, pady=10)
janela_cadastro.mainloop()
|
# # you can write to stdout for debugging purposes, e.g.
# # print("this is a debug message")
# def solution(A, S):
# # write your code in Python 3.6
# aLen = len(A)
# listTracker = []
# # print(aLen)
# for i in range(aLen):
# # print("i = %s -" % (i))
# subStringL = 1
# mean = A[i]
# # test case of single digit
# if mean == S:
# # print("found S")
# if A[i] not in listTracker:
# listTracker.append([A[i]])
# # check following digits
# for j in range(i + 1, aLen, 1):
# # print("j = %s --" % (j))
# # mean += A[j]
# mean = 0
# for k in range(i, j + 1):
# mean += A[k]
# mean = mean / (len(A[i:j]) + 1)
# if (mean.is_integer()):
# # print("found int")
# mean = int(mean)
# # print("mean = %s" % (mean))
# if mean == S:
# # print("found S")
# if A[i:j] not in listTracker:
# listTracker.append(A[i:j + 1])
# # print(listTracker)
# if len(listTracker) > 1000000000:
# return 1000000000
# else:
# return len(listTracker)
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A, S):
# write your code in Python 3.6
aLen = len(A)
listTracker = []
# go through all the digits and verity sum
for i in range(aLen):
subStringL = 1
mean = A[i]
# test case of single digit currently on
if mean == S:
if A[i] not in listTracker:
listTracker.append([A[i]])
# check following digits and check mean
for j in range(i + 1, aLen, 1):
mean = 0
for k in range(i, j + 1):
mean += A[k]
mean = mean / (len(A[i:j]) + 1)
if (mean.is_integer()):
mean = int(mean)
if mean == S:
# if its not already been identified add to list
if A[i:j] not in listTracker:
listTracker.append(A[i:j + 1])
if len(listTracker) > 1000000000:
return 1000000000
else:
return len(listTracker)
|
#!/usr/bin/env python
# coding: utf-8
# # Important Python Packages imported.
# In[1]:
from pylab import plot,show
from numpy import vstack,array
from numpy.random import rand
from scipy.cluster.vq import kmeans,vq
from sklearn import datasets
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from scipy import stats
import warnings
import impyute as impy
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import roc_auc_score
# # Import Data Set Using Pandas.
# In[2]:
Cancer = pd.read_csv('C:/Users/abdi K/Documents/PoB-Coursework/risk_factors_cervical_cancer (2).csv')
# # Check Data Type
# In[3]:
Cancer.info()
# ^ There are 5 columns that have to be deleted, 3 target variables and 2 features.
# 1) STDs: Time since first diagnosis
# 2) STDs: Time since last diagnosis
# 3) Hinselmann
# 4) Schiller
# 5) Citology
#
# Also object data type has to be converted to numeric or integer for preprocessing.
# # Count Of Biopsy Positive/Negative.
# In[4]:
Cancer['Biopsy'].value_counts()
# # Fill Missing Values with 'NaN'
# In[5]:
Cancerna = Cancer.replace('?', np.nan)
# # Find Column Totals of Missing Values
# In[6]:
Cancerna.isnull().sum()
# ^ We Can See that 2 features above have to be omitted as they are missing significant portions.
# In[7]:
Cancer = Cancerna
# ^ Re-define Cancerna as Cancer.
# # Deletion of 5 Attributes.
# In[8]:
Cancer.drop(Cancer.columns[[26,27,32,33,34]], axis=1, inplace=True)
# # Check How Many Columns Are Empty.
# In[9]:
Cancer.isnull().sum()
# ^ There are 24 columns with missing values and hence they have to be filled in som way.
# # Convert Objects into Numeric For Data Preprocessing
# In[10]:
Cancer = Cancer.convert_objects(convert_numeric=True)
# # Fill Columns with Mean/Median.
# In[11]:
Cancer['Number of sexual partners'] = Cancer['Number of sexual partners'].fillna(Cancer['Number of sexual partners'].mean())
Cancer['First sexual intercourse'] = Cancer['First sexual intercourse'].fillna(Cancer['First sexual intercourse'].mean())
Cancer['Num of pregnancies'] = Cancer['Num of pregnancies'].fillna(Cancer['Num of pregnancies'].median())
Cancer['Smokes'] = Cancer['Smokes'].fillna(Cancer['Smokes'].median())
Cancer['Smokes (years)'] = Cancer['Smokes (years)'].fillna(Cancer['Smokes (years)'].mean())
Cancer['Smokes (packs/year)'] = Cancer['Smokes (packs/year)'].fillna(Cancer['Smokes (packs/year)'].mean())
Cancer['Hormonal Contraceptives'] = Cancer['Hormonal Contraceptives'].fillna(Cancer['Hormonal Contraceptives'].median())
Cancer['Hormonal Contraceptives (years)'] = Cancer['Hormonal Contraceptives (years)'].fillna(Cancer['Hormonal Contraceptives (years)'].mean())
Cancer['IUD'] = Cancer['IUD'].fillna(Cancer['IUD'].median())
Cancer['IUD (years)'] = Cancer['IUD (years)'].fillna(Cancer['IUD (years)'].mean())
Cancer['STDs'] = Cancer['STDs'].fillna(Cancer['STDs'].median())
Cancer['STDs (number)'] = Cancer['STDs (number)'].fillna(Cancer['STDs (number)'].median())
Cancer['STDs:condylomatosis'] = Cancer['STDs:condylomatosis'].fillna(Cancer['STDs:condylomatosis'].median())
Cancer['STDs:cervical condylomatosis'] = Cancer['STDs:cervical condylomatosis'].fillna(Cancer['STDs:cervical condylomatosis'].median())
Cancer['STDs:vaginal condylomatosis'] = Cancer['STDs:vaginal condylomatosis'].fillna(Cancer['STDs:vaginal condylomatosis'].median())
Cancer['STDs:vulvo-perineal condylomatosis'] = Cancer['STDs:vulvo-perineal condylomatosis'].fillna(Cancer['STDs:vulvo-perineal condylomatosis'].median())
Cancer['STDs:syphilis'] = Cancer['STDs:syphilis'].fillna(Cancer['STDs:syphilis'].median())
Cancer['STDs:pelvic inflammatory disease'] = Cancer['STDs:pelvic inflammatory disease'].fillna(Cancer['STDs:pelvic inflammatory disease'].median())
Cancer['STDs:genital herpes'] = Cancer['STDs:genital herpes'].fillna(Cancer['STDs:genital herpes'].median())
Cancer['STDs:molluscum contagiosum'] = Cancer['STDs:molluscum contagiosum'].fillna(Cancer['STDs:molluscum contagiosum'].median())
Cancer['STDs:AIDS'] = Cancer['STDs:AIDS'].fillna(Cancer['STDs:AIDS'].median())
Cancer['STDs:HIV'] = Cancer['STDs:HIV'].fillna(Cancer['STDs:HIV'].median())
Cancer['STDs:Hepatitis B'] = Cancer['STDs:Hepatitis B'].fillna(Cancer['STDs:Hepatitis B'].median())
Cancer['STDs:HPV'] = Cancer['STDs:HPV'].fillna(Cancer['STDs:HPV'].median())
# ^ Fill continuous Variables with column mean and discrete/boolean variables with column median. There are 24 columns remaining in my final data set that have missing values. there are 6 columns that are continuous, such as; measurement in years. The remaining are all filled with the column-median.
# # Check All columns have been filled/Summary statistics.
# In[12]:
Cancer.isnull().sum()
# In[13]:
Cancer.describe()
# # Heatmap for Attribute Correlation.
# In[14]:
correlationMap = Cancer.corr()
plt.figure(figsize=(40,40))
sns.set(font_scale=3)
hm = sns.heatmap(correlationMap,cmap = 'Set1', cbar=True, annot=True,vmin=0,vmax =1,center=True, square=True, fmt='.2f', annot_kws={'size': 25},
yticklabels = Cancer.columns, xticklabels = Cancer.columns)
plt.show()
# ^ It is difficult to see which attributes are correlated, hence it would be more practical to plot heatmaps for one attribute at a time. Also the above correlations can be deduced intuitively. such as; STDs(number) correlated with STDs and IUD correlated with IUD(years) and so on. It may seem obvious, but a noteworthy correaltion to mention is; Age and number of pregnancies at 0.58 correlation.
# In[15]:
correlationMap = Cancer.corr()
k = 16
correlations = correlationMap.nlargest(k, 'Biopsy')['Biopsy'].index
M = Cancer[correlations].corr()
plt.figure(figsize=(10,10))
sns.set(font_scale=1)
H = sns.heatmap(M, cbar=True, cmap='rainbow' ,annot=True,vmin=0,vmax =1, square=True, fmt='.2f', annot_kws={'size': 12},
yticklabels = correlations.values, xticklabels = correlations.values)
plt.show()
# We can see that Hormonal Contraceptives and Age, IUD and Age have an effect on Biopsy
# # Comparison Of Biopsy Result For Attributes
# In[16]:
new_col= Cancer.groupby('Biopsy').mean()
print(new_col.head().T)
cols = ['Age', 'Number of sexual partners',
'First sexual intercourse', 'Smokes (packs/year)',
'Hormonal Contraceptives (years)','IUD (years)', 'Smokes (years)']
sns.pairplot(Cancer,
x_vars = cols,
y_vars = cols,
hue = 'Biopsy',)
# ^ Above we have a 31 by 3 matrix. First Row of 'Biopsy' is divided into its two categories, that is 0 and 1, 0 = negative and 1 = positive for onset of cervical cancer. The following rows are all the remaining 30 features. The average is calculated for each feature, this average is for all values corresponding to 0 and 1 for biopsy-method. We can see that there are 8 features completely free from a positive reading of cervcal cancer.
#
# 1) STDs:cervical condylomatosis
# 2) STDs:vaginal condylomatosis
# 3) STDs:syphilis
# 4) STDs:pelvic inflammatory disease
# 5) STDs:molluscum contagiosum
# 6) STDs:HPV
# 7) STDs:AIDS
# 8) STDs:Hepatitis B
#
# Human papilloma virus (HPV) is given to be the prime causer of cervical cancer [5]. In our investiagtion above, it seems not to be detected at all by biopsy screening method, suggesting it has no effect on cervical cancer or that screening method is very inaccurate.
#
# For positive readings of cervical cancer by Biopsy method, there is an increase for its column-average in each feature. Except for the following features, there is a decrease.
#
# 1) number of sexual partners
# 2) Hormonal contraceptives
# 3) STDs:pelvic inflammatory disease
#
# # ^ Multiple Pairwise Bivariate Distributions
# Observing graph of 'First sexual intercourse' by 'Age', it is visible that < 20 years for 'first sexual intercourse' and < 40 for 'Age' has an increased occurence of positive reading in Biopsy. Also 'First sexual intercourse' by 'Number of sexual partners' has positive readings concentrated around < 20 years for 'First sexual intercourse' and < 10 for 'Number of sexual partners'.
# # Random Forest Regressor
# Random forest is an ensemble algorithm that takes observations and variables and then creates decision trees. It is useful as it builds multiple decision trees then takes an average for an enhanced accuracey as compared to decision trees.
# # Creation of New columns From Existing Attributes for RF.
# YRSS:Years passed since patient had first sexual intercourse
# NSPP:Number of sexual partners since first time as a percentage.
# HPA: Hormonal Contraceptives/age
# TPS:Total packets of cigarettes smoked
# NPA:Number of pregnancies/age
# NSA:Number of sexual partners/age
# NYHC:number of years patient did not take Hormonal Contraceptives
# APP:number of pregnancy/number of sexual partner
# NHCP:number of years patient took Hormonal Contraceptives after first sexual intercourse as a percentage
# In[17]:
Cancer['YRSS'] = Cancer['Age'] - Cancer['First sexual intercourse']
Cancer['NSPP'] = Cancer['Number of sexual partners'] / Cancer['YRSS']
Cancer['HPA'] = Cancer['Hormonal Contraceptives (years)'] / Cancer['Age']
Cancer['TPS'] = Cancer['Smokes (packs/year)'] * Cancer['Smokes (years)']
Cancer['NPA'] = Cancer['Num of pregnancies'] / Cancer['Age']
Cancer['NSA'] = Cancer['Number of sexual partners'] / Cancer['Age']
Cancer['NYHC'] = Cancer['Age'] - Cancer['Hormonal Contraceptives (years)']
Cancer['APP'] = Cancer['Num of pregnancies'] / Cancer['Number of sexual partners']
Cancer['NHCP'] = Cancer['Hormonal Contraceptives (years)'] / Cancer['YRSS']
# ^ Above, I have decided to create new columns that might better explain a positive reading of cervical cancer.
# In[18]:
X = Cancer.drop('Biopsy', axis =1)
Y = Cancer["Biopsy"]
# ^ We have defined X and Y.
# Due to the division of columns above, there will be instances of division by zero; giving 'infinity', hence the need to replace 'infinity' with 0.
# In[19]:
x = X.replace([np.inf], 0)
# We check to see that all columns are full for RF algorithm to run.
# In[20]:
x.isnull().sum()
# ^ Column 'NHCP' is missing 16 values, we fill with its mean.
# In[21]:
x['NHCP'] = x['NHCP'].fillna(x['NHCP'].mean())
# In[22]:
x.isnull().sum()
# # Now we are ready to run RF algorithm.
# In[23]:
get_ipython().run_line_magic('timeit', '')
# In[24]:
model = RandomForestRegressor(max_features = 7, n_estimators = 100, n_jobs = -1, oob_score = True, random_state = 42)
# In[25]:
model.fit(x,Y)
# max_features: This is the maximum number of variables RF is allowed to test in each node. An increase in variables to be tested at each node generally improves performance, the downside is diversity of each node is reduced which is the unique selling point of RF. For our classification problem, I will use sqaure root of count of variables, which in our case is 6.24 but rounded to 7.
#
# n_estimators: This is the number of trees that are built before average is taken, ideal to have high number of trees, downside is code runs slower.
#
# n_jobs: This code tells engine how many processors to use, "1" for one processor and "-1" for unrestricted.
#
# random_state: Thise code allows solution to be easily replicated.
#
# oob_score: This is a RF cross-validation method.
#
# We have out of bag score, the trailing underscore after "score" means R^2 is available after model has been trained.
# We have a R^2 = 0.00317, this is very low.
# In[26]:
model.oob_score_
# We will calculate a C-stat. The C-statistic also called concordance statistic
# gives a measure of goodness of fit for binary outcomes in a logistic regression model. It will give us the probability a randomly selected patient who has experienced cervical cancer risks has a higher risk score than a patient who has not experienced the risks. It is equal to the area under the Receiver Operating Characteristic (ROC) curve.
# In[27]:
Y_oob = model.oob_prediction_
# In[28]:
print("C-Stat: ", roc_auc_score(Y, Y_oob))
# ^ We have a C-stat of 0.6645, This will be our benchmark and I will try to improve this C-stat value, also C-stat ranges from 0.5 to 1, usually values from and above 0.70 are considered good model fit.
# Y_oob = Y out of bag, this gives the prediction for every single observation. We can see that a lot of observations have 0 predictions and the remaining have low predictions.
# In[29]:
Y_oob
# # Improving Model
# I will introduce dummy variables for all categorical variables in my data set. This is done to capture directionality of the categorical variables, also dummy variables allow us to use one regression equation to represent many groups. K-1, where k is the number of levels for a variable determines how many dummy variables to use.
# In[30]:
categorical_variables = ["Smokes", "Hormonal Contraceptives", "IUD", "STDs", "STDs:condylomatosis",
"STDs:cervical condylomatosis", "STDs:vaginal condylomatosis", "STDs:vulvo-perineal condylomatosis", "STDs:syphilis",
"STDs:pelvic inflammatory disease", "STDs:genital herpes", "STDs:molluscum contagiosum", "STDs:AIDS", "STDs:HIV",
"STDs:Hepatitis B", "STDs:HPV", "Dx:Cancer", "Dx:CIN", "Dx:HPV", "Dx"]
for variable in categorical_variables :
dummies= pd.get_dummies(x[variable], prefix=variable)
x=pd.concat([x, dummies], axis=1)
x.drop([variable], axis=1, inplace=True)
# In[31]:
model = RandomForestRegressor(max_features = 8, n_estimators = 100, n_jobs = -1, oob_score = True, random_state = 42)
model.fit(x,Y)
# In[32]:
print("C-stat : ", roc_auc_score(Y, model.oob_prediction_))
# ^ after introducing dummy variables for all the categorical variables, we get a slight improvement of the model. This is a 3.4% improvement which is not enough to reach our mark of 0.7.
#
# # Plot And Sort Features Importance
# In[33]:
feature_importances= pd.Series(model.feature_importances_, index=x.columns)
feature_importances.plot(kind="bar", figsize=(20,20));
# ^ The Most important features are by a large margin, the columns created from existing attributes, namely;
# (1) YRSS = Years passed since patient had first sexual intercourse
# (2) NSA = Number of sexual partners/age.
# (3) NHCP = number of years patient took Hormonal Contraceptives after first sexual intercourse as a percentage
# (4) NYHC = number of years patient did not take Hormonal Contraceptives
#
# These Features are somewhat correlated with a positive reading on a Biopsy screening method, it is a very weak correlation.
#
# # Finding Optimal Number Of Trees For RF
# In[34]:
results=[]
n_estimator_values=[10,20,30,40,50,60,70,80,90,100,110,120,130,140,150,200]
for trees in n_estimator_values:
model=RandomForestRegressor(trees, oob_score=True, n_jobs=-1,random_state=42)
model.fit(x, Y)
print(trees, "trees")
roc_score=roc_auc_score(Y, model.oob_prediction_)
print("C-stat : ", roc_score)
results.append(roc_score)
print(" ")
pd.Series(results, n_estimator_values).plot();
# ^ 140 trees gives us the highest c-stat, i.e. 0.6798.
# # Finding Optimal Max_Features For RF
# In[35]:
results=[]
max_features_values=["auto", "sqrt", "log2", None, 0.2, 0.9]
for max_features in max_features_values:
model=RandomForestRegressor(n_estimators=140, oob_score=True, n_jobs=-1,random_state=42,
max_features=max_features)
model.fit(x, Y)
print(max_features, "option")
roc_score=roc_auc_score(Y, model.oob_prediction_)
print("C-stat : ", roc_score)
results.append(roc_score)
print(" ")
pd.Series(results, max_features_values).plot(kind="barh", xlim=(0.10, 0.8));
# ^ max_features, as a recap is; the maximum number of variables RF is allowed to test in each node. The optimal for our model is log2.
# # Finding Optimal Min_Samples_Leaf For RF
# In[36]:
results=[]
min_sample_leaf_values=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,100,140,200]
for min_sample in min_sample_leaf_values:
model=RandomForestRegressor(n_estimators=140, oob_score=True, n_jobs=-1,random_state=42,
max_features="log2", min_samples_leaf=min_sample)
model.fit(x, Y)
print(min_sample, "min sample")
roc_score=roc_auc_score(Y, model.oob_prediction_)
print("C-stat : ", roc_score)
results.append(roc_score)
print(" ")
pd.Series(results, min_sample_leaf_values).plot();
# min_samples_leaf: This is the minimum number of samples required to be at each node. The optimal for our model is; 1.
# # Final Optimised Model
# In[37]:
model=RandomForestRegressor(n_estimators=140, oob_score=True, n_jobs=-1,random_state=42,
max_features="log2", min_samples_leaf=1)
model.fit(x, Y)
roc_score=roc_auc_score(Y, model.oob_prediction_)
print("C-stat : ", roc_score)
# ^ We have achieved the desired 0.70 mark, this means our model has just passed the threshold of a good model fit.
|
import pygame
from pygame.sprite import Sprite
from time import sleep
vec = pygame.math.Vector2
class Pow(Sprite):
def __init__(self, ai_settings, screen, map, Game):
super(Pow, self).__init__()
self.screen = screen
self.ai_settings = ai_settings
self.map = map
self.game = Game
self.screen_rect = screen.get_rect()
self.image = pygame.image.load('images/mushroom.png')
self.rect = self.image.get_rect()
self.rect.centerx = float(self.screen_rect.width / 2)
self.rect.centery = float(self.screen_rect.height - 100)
self.center = float(self.rect.centerx)
self.vx = 0.5
def update(self):
#self.rect.centery = self.screen_rect.height / 2
self.rect.x += self.vx
def blitme(self):
self.screen.blit(self.image, self.rect) |
# -*- coding: utf-8 -*-
# import Env
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")
from env import Env
Env()
from pymongo import *
from datetime import datetime
from convert_datetime import dt_to_end_next05,dt_from_14digits_to_iso,shift_seconds
from get_coord import *
client = MongoClient()
db = client.nm4bd
# CONST
MATCH_NODE_THRESHOLD = 10
UPDATE_INTERVAL = 5
ANALYZE_LAG = 0
ADJACENT_FLAG = True # 分岐点以外でも隣接ノードokの条件の時True
# DEBUG_PRINT = True
DEBUG_PRINT = False
FLOOR_LIST = ["W2-6F","W2-7F","W2-8F","W2-9F","kaiyo"]
# global var.
examine_count = 0
exist_count = 0
match_count = 0
adjacent_count = 0
middle_count = 0
wrong_node_count = 0
wrong_floor_count = 0
error_distance = 0.0
# stay_position_list: 移動実験:None,stay実験:position_list
stay_position_list = None
stay_correct_nodes = None
def examine_route(mac,floor,st_node,ed_node,via_nodes_list,st_dt,ed_dt,via_dts_list,stay_pos = [],query = None):
global examine_count
global exist_count
global match_count
global adjacent_count
global middle_count
global wrong_node_count
global wrong_floor_count
global error_distance
global stay_position_list
global stay_correct_nodes
examine_count = 0
exist_count = 0
match_count = 0
adjacent_count = 0
middle_count = 0
wrong_node_count = 0
wrong_floor_count = 0
error_distance = 0
correct_answer_rate = 0
correct_answer_rate_alt = 0
average_error_distance = None
average_error_distance_m = None
if len(stay_pos) == 4:
stay_position_list = stay_pos
elif st_node == ed_node and len(via_dts_list) == 0:
node_info = db.pcwlnode.find_one({"floor":floor, "pcwl_id":st_node})
next_node = node_info["next_id"][0]
next_dist = get_distance(floor,st_node,next_node)
stay_position_list = [st_node,0.0,next_dist,next_node]
else:
stay_position_list = None
if stay_position_list is not None:
prev_node,prev_distance,next_distance,next_node = stay_position_list
if prev_distance < MATCH_NODE_THRESHOLD:
stay_correct_nodes = add_adjacent_nodes(floor,prev_node,ADJACENT_FLAG)
elif next_distance < MATCH_NODE_THRESHOLD:
stay_correct_nodes = add_adjacent_nodes(floor,next_node,ADJACENT_FLAG)
else:
stay_correct_nodes = [prev_node,next_node]
else:
stay_correct_nodes = None
if query is None:
exp_id = None
else:
exp_id = query["exp_id"]
if len(via_nodes_list)== 0:
examine_partial_route(mac,floor,st_node,ed_node,st_dt,ed_dt)
else:
examine_partial_route(mac,floor,st_node,via_nodes_list[0],st_dt,via_dts_list[0])
for i in range(len(via_nodes_list) - 1):
examine_partial_route(mac,floor,via_nodes_list[i],via_nodes_list[i+1],via_dts_list[i],via_dts_list[i+1])
examine_partial_route(mac,floor,via_nodes_list[-1],ed_node,via_dts_list[-1],ed_dt)
accuracy,existing_data_rate,average_error_distance, match_rate, adjacent_rate, middle_rate, wrong_node_rate = process_count_result()
if average_error_distance is not None:
average_error_distance_m = rounding(average_error_distance * 14.4 / 110,2)
average_error_distance = rounding(average_error_distance,2)
db.examine_summary.insert({"exp_id":exp_id,"mac":mac,"floor":floor,"st_node":st_node,"ed_node":ed_node,"via_nodes_list":via_nodes_list,"st_dt":st_dt,"ed_dt":ed_dt,"via_dts_list":via_dts_list,
"accuracy":accuracy,"existing_rate":existing_data_rate,
"avg_err_dist[px]":average_error_distance,"avg_err_dist[m]":average_error_distance_m,
"match_rate":match_rate, "adjacent_rate":adjacent_rate, "middle_rate":middle_rate, "wrong_node_rate":wrong_node_rate})
def examine_partial_route(mac,floor,st_node,ed_node,st_dt,ed_dt):
ideal_one_route = {}
total_distance = 0
delta_distance = 0
velocity = 0
nodes = []
temp_dlist = []
dlist = []
data = {}
is_correct = False
is_exist = False
judgement = ""
if stay_position_list is not None:
if db.examine_route.find({"mac":mac,"datetime":st_dt}).count() == 0:
st_next05_dt = dt_to_end_next05(st_dt,"iso")
judgement = examine_position(mac,floor,st_next05_dt)
update_partial_count(judgement)
else:
st_next05_dt = st_dt
while st_next05_dt <= shift_seconds(ed_dt,-UPDATE_INTERVAL):
st_next05_dt = shift_seconds(st_next05_dt,UPDATE_INTERVAL)
judgement = examine_position(mac,floor,st_next05_dt)
update_partial_count(judgement)
else:
ideal_one_route = db.idealroute.find_one({"$and": [{"floor" : floor},{"query" : st_node},{"query" : ed_node}]})
if ideal_one_route["query"][0] != st_node:
temp_dlist = ideal_one_route["dlist"]
for i in range(-1,-len(temp_dlist)-1,-1):
dlist.append({"direction":[temp_dlist[i]["direction"][1],temp_dlist[i]["direction"][0]],"distance":temp_dlist[i]["distance"]})
else:
dlist = ideal_one_route["dlist"]
total_distance = ideal_one_route["total_distance"]
velocity = total_distance / (ed_dt - st_dt).seconds
if DEBUG_PRINT:
print("\n" + "from " + str(st_node) + " to " + str(ed_node) + " : velocity = " + str(rounding(velocity,2)) + " [px/s]")
if db.examine_route.find({"mac":mac,"datetime":st_dt}).count() == 0:
st_next05_dt = dt_to_end_next05(st_dt,"iso")
delta_distance = velocity * (st_next05_dt - st_dt).seconds
judgement = examine_position(mac,floor,st_next05_dt,dlist,delta_distance)
update_partial_count(judgement)
else:
st_next05_dt = st_dt
while st_next05_dt <= shift_seconds(ed_dt,-UPDATE_INTERVAL):
delta_distance += velocity * UPDATE_INTERVAL
st_next05_dt = shift_seconds(st_next05_dt,UPDATE_INTERVAL)
judgement = examine_position(mac,floor,st_next05_dt,dlist,delta_distance)
update_partial_count(judgement)
def examine_position(mac,floor,dt,dlist = [],delta_distance = 0):
global error_distance
real_floor = ""
analyzed_node = 0
correct_nodes = []
actual_position_list = [0,0.0,0.0,0] # 計算した場所の格納用([前ノードのid,prev_distance,next_distance,次ノードのid])
analyzed_position_list = [0,0.0,0.0,0]
judgement = ""
moment_error_dist = 0
temp_dist = 0
min_dist = 9999
if stay_position_list is not None:
actual_position_list = stay_position_list
correct_nodes = stay_correct_nodes
else:
correct_nodes,actual_position_list = find_correct_nodes_and_position(floor,dlist,delta_distance)
pos_x,pos_y = get_position(floor,actual_position_list)
# get_coord_from_info(floor, mac, dt)
analyzed_data = db.analy_coord.find_one({"datetime":dt, "mac":mac})
if analyzed_data is None:
judgement = "F(None)"
moment_error_dist = None
else:
# real_floor = analyzed_data["floor"]
# To Do : improve verification process by using analyzed data
real_floor, analyzed_node = find_analyzed_node(mac, floor, dt)
if real_floor != floor:
judgement = "F("+ real_floor + ")"
moment_error_dist = None
if real_floor == floor:
mlist = analyzed_data["mlist"]
analyzed_position_list = analyzed_data["position"]
# analyzed_actual_dist = get_distance_between_points(floor,analyzed_position_list,actual_position_list)
for i in range(len(mlist)):
# analyzed_margin_dist = get_distance_between_points(floor,analyzed_position_list,mlist[i]["pos"])
# if analyzed_actual_dist < mlist[i]["margin"]:
# moment_error_dist = 0
# break
if isinside(analyzed_data["pos_x"],pos_x,mlist[i]["pos_x"]) and isinside(analyzed_data["pos_y"],pos_y,mlist[i]["pos_y"]):
moment_error_dist = 0
break
else:
temp_dist = mlist[i]["margin"]
temp_dist += get_distance_between_points(floor,mlist[i]["pos"],actual_position_list)
if temp_dist < min_dist:
min_dist = temp_dist
moment_error_dist = rounding(min_dist - mlist[i]["margin"],2)
error_distance += moment_error_dist
if not (analyzed_node in correct_nodes):
judgement = "F(Wrong Node)"
elif len(correct_nodes) == 2:
judgement = "T(Middle)"
elif analyzed_node == correct_nodes[0]:
judgement = "T(Match)"
else:
judgement = "T(Adjacent)"
db.examine_route.insert({"floor": floor, "mac": mac, "datetime":dt,"judgement":judgement,"position":actual_position_list,
"pos_x":pos_x,"pos_y":pos_y,"correct":correct_nodes,"analyzed":analyzed_node,"err_dist":moment_error_dist})
db.actual_position.insert({"floor": floor, "mac": mac, "datetime":dt,"pos_x":pos_x,"pos_y":pos_y})
if DEBUG_PRINT:
print(str(dt) + ":" + judgement,"pos:" + str(actual_position_list),"correct:" + str(correct_nodes),
"analyzed:" + str(analyzed_node),"err_dist:" + str(moment_error_dist),end="")
if moment_error_dist is not None:
print("[px]")
else:
print("")
return judgement
def update_partial_count(judgement):
global examine_count
global exist_count
global match_count
global adjacent_count
global middle_count
global wrong_node_count
global wrong_floor_count
examine_count += 1
exist_count += 1
if judgement[0] == "T":
if judgement[2:7] == "Match":
match_count += 1
elif judgement[2:10] == "Adjacent":
adjacent_count += 1
elif judgement[2:8] == "Middle":
middle_count += 1
else:
print("unexpected judgement error!")
if judgement[0] == "F":
if judgement[2:6] == "None":
exist_count -= 1 # とりあえず増やしたexist_countを取消
elif judgement[2:12] == "Wrong Node":
wrong_node_count += 1
elif judgement[2:-1] in FLOOR_LIST:
wrong_floor_count += 1
else:
print("unexpected judgement error!")
def isinside(end1_coord,target_coord,end2_coord):
if end1_coord <= target_coord <= end2_coord or end1_coord >= target_coord >= end2_coord:
return True
else:
return False
def find_analyzed_node(mac,floor,dt):
analyzed_data = {}
analyzed_node = 0
same_floor_query = {"floor":floor,"datetime":dt,"mac":mac}
all_floors_query = {"datetime":dt,"mac":mac}
# 同一フロアのflowに解析データが存在
analyzed_data = db.pfvmacinfo.find_one(same_floor_query)
if analyzed_data is not None:
analyzed_node = analyzed_data["route"][-1][1]
return floor,analyzed_node
# 同一フロアのstayに解析データが存在
analyzed_data = db.staymacinfo.find_one(same_floor_query)
if analyzed_data is not None:
analyzed_node = analyzed_data["pcwl_id"]
return floor,analyzed_node
# 違うフロアのflowに解析データが存在
analyzed_data = db.pfvmacinfo.find_one(all_floors_query)
if analyzed_data is not None:
analyzed_node = analyzed_data["route"][-1][1]
floor = analyzed_data["floor"]
return floor,analyzed_node
# 違うフロアのstayに解析データが存在
analyzed_data = db.staymacinfo.find_one(all_floors_query)
if analyzed_data is not None:
analyzed_node = analyzed_data["pcwl_id"]
floor = analyzed_data["floor"]
return floor,analyzed_node
return None,None
def find_correct_nodes_and_position(floor,dlist,delta_distance):
temp_distance = 0 # 一次保存用
next_distance = 0 # 計算した場所から次ノードまでの距離
prev_distance = 0 # 計算した場所から前ノードまでの距離
correct_nodes = []
actual_position_list = [0,0,0,0] # 計算した場所の格納用([前ノードのid,prev_distance,next_distance,次ノードのid])
for i in range(len(dlist)):
temp_distance += dlist[i]["distance"]
if temp_distance >= delta_distance:
next_distance = temp_distance - delta_distance
prev_distance = dlist[i]["distance"] - next_distance
if next_distance < MATCH_NODE_THRESHOLD:
correct_nodes = add_adjacent_nodes(floor,dlist[i]["direction"][1],ADJACENT_FLAG)
break
elif prev_distance < MATCH_NODE_THRESHOLD:
correct_nodes = add_adjacent_nodes(floor,dlist[i]["direction"][0],ADJACENT_FLAG)
break
else:
correct_nodes = dlist[i]["direction"]
break
else:
print("reached ed_node!!")
next_distance = 0
prev_distance = dlist[i]["distance"]
correct_nodes = add_adjacent_nodes(floor,dlist[i]["direction"][1],ADJACENT_FLAG)
actual_position_list = [dlist[i]["direction"][0],rounding(prev_distance,2),rounding(next_distance,2),dlist[i]["direction"][1]]
return correct_nodes,actual_position_list
def add_adjacent_nodes(floor,node,adjacent_flag):
pcwlnode = {}
adjacent_nodes = [node]
node_info = db.pcwlnode_test.find_one({"floor":floor,"pcwl_id":node})
adjacent_nodes.extend(node_info["next_id"])
if len(adjacent_nodes) >= 3:
return adjacent_nodes
elif adjacent_flag:
return adjacent_nodes
else:
return [node]
# def get_error_distance(floor,analyzed_node,actual_position_list):
# prev_node,prev_distance,next_distance,next_node = actual_position_list
# if analyzed_node == prev_node:
# return rounding(prev_distance,2)
# if analyzed_node == next_node:
# return rounding(next_distance,2)
# via_prev_distance = prev_distance
# via_next_distance = next_distance
# via_prev_query = {"$and": [{"floor" : floor},{"query" : analyzed_node},{"query" : prev_node}]}
# via_next_query = {"$and": [{"floor" : floor},{"query" : analyzed_node},{"query" : next_node}]}
# via_prev_distance += db.idealroute.find_one(via_prev_query)["total_distance"]
# via_next_distance += db.idealroute.find_one(via_next_query)["total_distance"]
# return rounding(min(via_prev_distance,via_next_distance),2)
def get_distance_between_points(floor,position_list1,position_list2):
prev1_prev2 = 0
next1_prev2 = 0
prev1_next2 = 0
next1_next2 = 0
prev_node1,prev_distance1,next_distance1,next_node1 = position_list1
prev_node2,prev_distance2,next_distance2,next_node2 = position_list2
if prev_node1 == prev_node2 and next_node1 == next_node2:
return abs(prev_distance1 - prev_distance2)
if prev_node1 == next_node2 and prev_node2 == next_node1:
return abs(prev_distance1 - next_distance2)
prev1_prev2 = prev_distance1 + get_distance(floor, prev_node1, prev_node2) + prev_distance2
next1_prev2 = next_distance1 + get_distance(floor, next_node1, prev_node2) + prev_distance2
prev1_next2 = prev_distance1 + get_distance(floor, prev_node1, next_node2) + next_distance2
next1_next2 = next_distance1 + get_distance(floor, next_node1, next_node2) + next_distance2
return min(prev1_prev2, next1_prev2, prev1_next2, next1_next2)
# dist_list = [prev_prev, next_prev, prev_next, next_next]
# min_dist = min(dist_list)
# min_index = dist_list.index(min_dist)
# if min_index == 0 or min_index == 1:
# return min_dist,"prev"
# elif min_index == 2 or min_index == 3:
# return min_dist,"next"
# else:
# print("二点間の距離の計算失敗")
# return 0,None
# rounding in specified place
def rounding(num, round_place):
rounded_num = round(num*pow(10, round_place)) / pow(10, round_place)
return rounded_num
def process_count_result():
accuracy = None
existing_accuracy = None
existing_data_rate = None
match_rate = 0
adjacent_rate = 0
middle_rate = 0
wrong_floor_rate = 0
wrong_node_rate = 0
correct_count = 0
average_error_distance = None
correct_count = match_count + adjacent_count + middle_count
false_count = examine_count - correct_count
if examine_count == 0 :
print("--- no data!! ---")
else:
accuracy = rounding(correct_count / examine_count * 100, 2)
print ("\n" + "accuracy: " + str(accuracy) + "% "
+ "( " + str(correct_count) + " / " + str(examine_count) + " )",end=" ")
if exist_count == 0 :
print("--- info_data does not exist! ---")
else:
average_error_distance = error_distance/exist_count
existing_accuracy = rounding(correct_count / exist_count * 100, 2)
print ("accuracy(existing only): " + str(existing_accuracy) + "% "
+ "( " + str(correct_count) + " / " + str(exist_count) + " )",end=" ")
existing_data_rate = rounding(exist_count/examine_count * 100, 2)
print("existing data rate: " + str(existing_data_rate) + "% "
+ "( " + str(exist_count) + " / " + str(examine_count) + " )")
print("average error distance: " + str(rounding(average_error_distance,2)) + "[px]")
print("\n-- detail info of true results --")
match_rate = rounding(match_count / exist_count * 100, 2)
print ("match rate: " + str(match_rate) + "% "
+ "( " + str(match_count) + " / " + str(exist_count) + " )",end=" ")
adjacent_rate = rounding(adjacent_count / exist_count * 100, 2)
print ("adjacent rate: " + str(adjacent_rate) + "% "
+ "( " + str(adjacent_count) + " / " + str(exist_count) + " )",end=" ")
middle_rate = rounding(middle_count / exist_count * 100, 2)
print ("middle rate: " + str(middle_rate) + "% "
+ "( " + str(middle_count) + " / " + str(exist_count) + " )",end=" ")
if correct_count != 0:
match_rate_true = rounding(match_count / correct_count * 100, 2)
print ("match rate(true only): " + str(match_rate_true) + "% "
+ "( " + str(match_count) + " / " + str(correct_count) + " )")
adjacent_rate_true = rounding(adjacent_count / correct_count * 100, 2)
print ("adjacent rate(true only): " + str(adjacent_rate_true) + "% "
+ "( " + str(adjacent_count) + " / " + str(correct_count) + " )")
middle_rate_true = rounding(middle_count / correct_count * 100, 2)
print ("middle rate(true only): " + str(middle_rate_true) + "% "
+ "( " + str(middle_count) + " / " + str(correct_count) + " )")
print("\n-- detail info of false results --")
wrong_floor_rate = rounding(wrong_floor_count / exist_count * 100, 2)
print ("wrong floor rate: " + str(wrong_floor_rate) + "% "
+ "( " + str(wrong_floor_count) + " / " + str(exist_count) + " )",end=" ")
wrong_node_rate = rounding(wrong_node_count / exist_count * 100, 2)
print ("wrong node rate: " + str(wrong_node_rate) + "% "
+ "( " + str(wrong_node_count) + " / " + str(exist_count) + " )",end=" ")
if false_count != 0:
wrong_floor_rate_false = rounding(wrong_floor_count / false_count * 100, 2)
print ("wrong floor rate(false only): " + str(wrong_floor_rate_false) + "% "
+ "( " + str(wrong_floor_count) + " / " + str(false_count) + " )")
wrong_node_rate_false = rounding(wrong_node_count / false_count * 100, 2)
print ("wrong node rate(false only): " + str(wrong_node_rate_false) + "% "
+ "( " + str(wrong_node_count) + " / " + str(false_count) + " )")
return existing_accuracy,existing_data_rate,average_error_distance, match_rate, adjacent_rate, middle_rate, wrong_node_rate
# # DBに入っているデータを出力することも可能(コメント解除)
# def is_correct_node(mac,floor,dt,nodes):
# # dtにする
# analyze_time = shift_seconds(dt,ANALYZE_LAG)
# analyze_data = {}
# pfv_query = {"floor":floor,"datetime":analyze_time,"mac":mac}
# stay_query = {"floor":floor,"datetime":analyze_time,"mac":mac}
# # print(pfv_query)
# pfv_query_alt = {"datetime":analyze_time,"mac":mac}
# stay_query_alt = {"datetime":analyze_time,"mac":mac}
# for node in nodes:
# analyze_data = db.pfvmacinfo.find_one(pfv_query)
# # print(analyze_data)
# stay_query["pcwl_id"] = node
# if (analyze_data is not None and analyze_data["route"][-1][1] == node):
# return True,analyze_data["route"][-1][1]
# # return True, analyze_data["route"]
# elif (db.staymacinfo.find(stay_query).count() == 1):
# # TODO: ~.count() >= 2 pattern
# return True,db.staymacinfo.find_one(stay_query)["pcwl_id"]
# # return True, node
# # for node in nodes:
# analyze_data = db.pfvmacinfo.find_one(pfv_query)
# # print(analyze_data)
# if (analyze_data is not None):
# return False,analyze_data["route"][-1][1]
# # print(stay_query)
# del(stay_query["pcwl_id"])
# analyze_data = db.staymacinfo.find_one(stay_query)
# # print(analyze_data)
# if (analyze_data is not None):
# return False,analyze_data["pcwl_id"]
# # return False, analyze_data["route"]
# for node in nodes:
# analyze_data = db.pfvmacinfo.find_one(pfv_query_alt)
# stay_query_alt["pcwl_id"] = node
# stay_data = db.staymacinfo.find(stay_query_alt)
# if (analyze_data is not None):
# return False
# # return False, analyze_data["route"]
# elif (stay_data.count() == 1):
# # TODO: ~.count() >= 2 pattern
# return False
# # return False, stay_data[0]["floor"]
# return False, None
# # return False, None
# # DBにデータが入っているか確認
# def is_exist_data(mac,floor,dt,nodes):
# analyze_time = shift_seconds(dt,ANALYZE_LAG)
# query = {"floor":floor,"datetime":analyze_time,"mac":mac}
# if(db.pfvmacinfo.find(query).count() != 0):
# return True
# elif(db.staymacinfo.find(query).count() != 0):
# return True
# else:
# return False
# # judge correct, judge data exists, and insert examine_route
# def judge_and_ins_correct_route(mac,floor,nodes,st_dt,st_next05_dt,correct_count,examine_count,exist_count):
# # is_correct = is_correct_node(mac,floor,st_next05_dt,nodes)
# is_correct, analyzed_data = is_correct_node(mac,floor,st_next05_dt,nodes)
# is_exist = is_exist_data(mac,floor,st_next05_dt,nodes)
# examine_count += 1
# if is_correct:
# correct_count += 1
# if is_exist:
# exist_count += 1
# db.examine_route.insert({"datetime":st_next05_dt,"nodes":nodes,"is_correct":is_correct})
# if DEBUG_PRINT:
# print(str(st_next05_dt) + " : " + str(nodes) + " , " + str(is_correct)+ " , "+ str(analyzed_data))
# # print(" analyzed data " + str(analyzed_data))
# return correct_count, examine_count, exist_count
if __name__ == '__main__':
### exp_info ###
mac = "00:11:81:10:01:19"
floor = "W2-7F"
### flow ###
st_node = 1
ed_node = 1
via_nodes_list = [5,21,17,12,9,7,5]
common_dt = str(2016102013) # 測定時刻における先頭の共通部分
st_dt = dt_from_14digits_to_iso(common_dt + str(5500))
ed_dt = dt_from_14digits_to_iso(common_dt + str(5818))
via_dts_list = [5528,5545,5613,5639,5654,5725,5749]
### stay ###
# mac = "00:11:81:10:01:17"
# st_node = 5
# ed_node = 5
# via_nodes_list = []
# common_dt = str(20161020) # 測定時刻における先頭の共通部分
# st_dt = dt_from_14digits_to_iso(common_dt + str(115600))
# ed_dt = dt_from_14digits_to_iso(common_dt + str(120920))
# via_dts_list = []
# db.examine_route.remove({})
for i in range(len(via_dts_list)):
via_dts_list[i] = dt_from_14digits_to_iso(common_dt + str(via_dts_list[i]))
while (st_dt <= ed_dt):
get_coord_from_info(floor, mac, st_dt)
st_dt = shift_seconds(st_dt, 5)
examine_route(mac,floor,st_node,ed_node,via_nodes_list,st_dt,ed_dt,via_dts_list)
|
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import Conv3D, MaxPooling3D, ZeroPadding3D
from keras.optimizers import SGD
from keras.layers import Input
from keras.models import Model
def get_model():
""" Return the Keras model of the network
"""
model = Sequential()
inputShape=(16, 224, 224,3) #seq_length, width, height, channels
numberOfActivities=101
inputs = Input(inputShape)
# 1st layer group
conv1_3d=Conv3D(64, (3, 3, 3), activation='relu', padding='same', name='conv1',strides=(1, 1, 1))
pool1_3d=MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), padding='valid', name='pool1')
# 2nd layer group
conv2_3d=Conv3D(128, (3, 3, 3), activation='relu', padding='same', name='conv2',strides=(1, 1, 1))
pool2_3d=MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool2')
# 3rd layer group
conv3a_3d=Conv3D(256, (3, 3, 3), activation='relu', padding='same', name='conv3a',strides=(1, 1, 1))
conv3b_3d=Conv3D(256, (3, 3, 3), activation='relu', padding='same', name='conv3b',strides=(1, 1, 1))
pool3_3d=MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool3')
# 4th layer group
conv4a_3d=Conv3D(512, (3, 3, 3), activation='relu', padding='same', name='conv4a',strides=(1, 1, 1))
conv4b_3d=Conv3D(512, (3, 3, 3), activation='relu', padding='same', name='conv4b',strides=(1, 1, 1))
pool4_3d=MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool4')
# 5th layer group
conv5a_3d=Conv3D(512, (3, 3, 3), activation='relu', padding='same', name='conv5a',strides=(1, 1, 1))
conv5b_3d=Conv3D(512, (3, 3, 3), activation='relu', padding='same', name='conv5b',strides=(1, 1, 1))
pool5_3d=MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool5')
dense1_3d=Dense(4096, activation='relu', name='fc6')
model.add(Dropout(.5))
dense2_3d=Dense(4096, activation='relu', name='fc7')
model.add(Dropout(.5))
dense3_3d=Dense(numberOfActivities, activation='softmax', name='fc8')
x=conv1_3d(inputs)
x=pool1_3d(x)
x=conv2_3d(x)
x=pool2_3d(x)
x=conv3a_3d(x)
x=conv3b_3d(x)
x=pool3_3d(x)
x=conv4a_3d(x)
x=conv4b_3d(x)
x=pool4_3d(x)
x=conv5a_3d(x)
x=conv5b_3d(x)
x=pool5_3d(x)
x = Flatten()(x)
x=dense1_3d(x)
x = Dropout(0.5)(x)
x=dense2_3d(x)
x = Dropout(0.5)(x)
x=dense3_3d(x)
model = Model(inputs, x)
return model
model = get_model()
model.summary()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-02 07:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('customer', '0004_tokomodel_nama_user'),
]
operations = [
migrations.CreateModel(
name='BarangModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nama_barang', models.CharField(max_length=200)),
('deskripsi_barang', models.TextField()),
('harga', models.DecimalField(decimal_places=2, max_digits=10)),
('jumlah_tersedia', models.IntegerField()),
('nama_tokos', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='customer.TokoModel')),
],
),
]
|
# A simple blog using flask with a database
#https://www.bogotobogo.com/python/Flask/Python_Flask_Blog_App_Tutorial_5.php
from flask import Flask, redirect, render_template, request, url_for
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import login_user, LoginManager, UserMixin, logout_user, login_required, current_user
from werkzeug.security import check_password_hash
from datetime import datetime
import os
import uuid
import json
app = Flask(__name__)
app.config["DEBUG"] = True #Dic key:value pair to allow debugging (go to error.log if page not compiling)
#Code to connect to Database
SQLALCHEMY_DATABASE_URI = "mysql+mysqlconnector://{username}:{password}@{hostname}/{databasename}".format(
username = "milsteam4144",
password = "Happy2_dev",
hostname="milsteam4144.mysql.pythonanywhere-services.com",
databasename="milsteam4144$comments",
)
app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
app.config["SQLALCHEMY_POOL_RECYCLE"] = 299
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
#Enable Flask Migrate
migrate = Migrate(app, db)
#Set path to images
UPLOAD_FOLDER = '/home/milsteam4144/mysite/images/'
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
#Set up the login system
app.secret_key = "bdyew87ahdiuaWGS0'MG" # Secret random key used for cryptography
login_manager = LoginManager()
login_manager.init_app(app)
#Defines a "User" class
class User(UserMixin, db.Model): #The user class inherits from both Flask-Login's UserMixin and SQLAlchemy's db.Model
__tablename__ = "users" #This syntax comes from SQLAlchemy's db.Model class
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(128))
password_hash = db.Column(db.String(128))
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def get_id(self):
return self.username
#A function that accepts a string(username) and returns a corresponding User object from a dictionary
@login_manager.user_loader
def load_user(user_id):
return User.query.filter_by(username=user_id).first()
#Defines a class to hold the comments using a model
class Comment(db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100))
content = db.Column(db.String(4096))
image = db.Column(db.String(500))
posted = db.Column(db.DateTime, default=datetime.now)
commenter_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=True)
commenter = db.relationship('User', foreign_keys=commenter_id)
#Define the "index" VIEW
@app.route("/", methods=["GET", "POST"]) # GET allows users to view the page, POST sends data
def index():
if request.method == "GET":
return render_template("main_page.html", comments=Comment.query.all())
#If the request is not a GET, it is a POST (send data) and this code will execute
if request.method == "POST":
file = request.files['file']
extension = os.path.splitext(file.filename)[1]
f_name = str(uuid.uuid4()) + extension
file.save(os.path.join(app.config['UPLOAD_FOLDER'], f_name))
if not current_user.is_authenticated: #If the user is not logged in, redirect them to same page, but do not post the comment
return redirect(url_for('index'))
if f_name is None:
image = ''
else:
image = "/static/" + f_name
comment = Comment(content=request.form["contents"], commenter=current_user, image=image, title = request.form["title"] ) #Creates the comment object and assigns it to a variable
db.session.add(comment) #Sends the command to the database, leaves a transaction open
db.session.commit() #Commits the changes to the db and closes the transaction
return redirect (url_for('index'))
#Login VIEW
@app.route("/login/", methods=["GET", "POST"])
def login():
if request.method == "GET":
return render_template("login_page.html", error=False)
if request.method == "POST":
user = load_user(request.form["username"])
if user is None:
return render_template("login_page.html", error=True)
if not user.check_password(request.form["password"]):
return render_template("login_page.html", error=True)
else: #If the password matches, log the user in
login_user(user)
return redirect(url_for('index'))
#Logout VIEW
@app.route("/logout/")
@login_required #This means that the logout view can only be viewed by users that are logged in
def logout():
logout_user()
return redirect (url_for('index')) |
x = input("enter any value between 1-10")
y = input("enter any value between 1-10")
z = x+y
c = z+30
print("result is",c)
|
#!/usr/bin/env python3
#
# Ryan Lamb
# CPSC 223P-03
#2020-9-16
#rclamb27@cus.fullerton.edu
"""Ouputs the sum of all multiples of 3 or 5 below 10000000"""
def main():
"""Takes the multiples of 3 and 5 and sums them"""
total_nums = input('Please input a range of numbers below 1000000: ')
print('You chose {} as the range.'.format(total_nums))
mult_3 = set(range(0, int(total_nums), 3))
mult_5 = set(range(0, int(total_nums), 5))
intersection = mult_3 | mult_5
print('The sum of all the multiples of 3 or 5 below 1000000 is {}'.format(sum(intersection)))
if __name__ == '__main__':
main()
|
import requests
import json
data = list()
response = requests.get('https://api.weibo.com/2/emotions.json?source=1362404091')
for i in response.json():
data.append({"alt":i["phrase"],"src":i["icon"]})
print(data) |
from functools import partial
from typing import Tuple
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
from rljax.algorithm.base_class import QLearning
from rljax.network import DiscreteQFunction
from rljax.util import get_q_at_action, huber, optimize
class DQN(QLearning):
name = "DQN"
def __init__(
self,
num_agent_steps,
state_space,
action_space,
seed,
max_grad_norm=None,
gamma=0.99,
nstep=1,
buffer_size=10 ** 6,
use_per=False,
batch_size=32,
start_steps=50000,
update_interval=4,
update_interval_target=8000,
eps=0.01,
eps_eval=0.001,
eps_decay_steps=250000,
loss_type="huber",
dueling_net=False,
double_q=False,
setup_net=True,
fn=None,
lr=2.5e-4,
units=(512,),
):
super(DQN, self).__init__(
num_agent_steps=num_agent_steps,
state_space=state_space,
action_space=action_space,
seed=seed,
max_grad_norm=max_grad_norm,
gamma=gamma,
nstep=nstep,
buffer_size=buffer_size,
batch_size=batch_size,
use_per=use_per,
start_steps=start_steps,
update_interval=update_interval,
update_interval_target=update_interval_target,
eps=eps,
eps_eval=eps_eval,
eps_decay_steps=eps_decay_steps,
loss_type=loss_type,
dueling_net=dueling_net,
double_q=double_q,
)
if setup_net:
if fn is None:
def fn(s):
return DiscreteQFunction(
action_space=action_space,
hidden_units=units,
dueling_net=dueling_net,
)(s)
self.net = hk.without_apply_rng(hk.transform(fn))
self.params = self.params_target = self.net.init(next(self.rng), *self.fake_args)
opt_init, self.opt = optax.adam(lr, eps=0.01 / batch_size)
self.opt_state = opt_init(self.params)
@partial(jax.jit, static_argnums=0)
def _forward(
self,
params: hk.Params,
state: np.ndarray,
) -> jnp.ndarray:
return jnp.argmax(self.net.apply(params, state), axis=1)
def update(self, writer=None):
self.learning_step += 1
weight, batch = self.buffer.sample(self.batch_size)
state, action, reward, done, next_state = batch
self.opt_state, self.params, loss, abs_td = optimize(
self._loss,
self.opt,
self.opt_state,
self.params,
self.max_grad_norm,
params_target=self.params_target,
state=state,
action=action,
reward=reward,
done=done,
next_state=next_state,
weight=weight,
**self.kwargs_update,
)
# Update priority.
if self.use_per:
self.buffer.update_priority(abs_td)
# Update target network.
if self.agent_step % self.update_interval_target == 0:
self.params_target = self._update_target(self.params_target, self.params)
if writer and self.learning_step % 1000 == 0:
writer.add_scalar("loss/q", loss, self.learning_step)
@partial(jax.jit, static_argnums=0)
def _calculate_value(
self,
params: hk.Params,
state: np.ndarray,
action: np.ndarray,
) -> jnp.ndarray:
return get_q_at_action(self.net.apply(params, state), action)
@partial(jax.jit, static_argnums=0)
def _calculate_target(
self,
params: hk.Params,
params_target: hk.Params,
reward: np.ndarray,
done: np.ndarray,
next_state: np.ndarray,
) -> jnp.ndarray:
if self.double_q:
next_action = self._forward(params, next_state)[..., None]
next_q = self._calculate_value(params_target, next_state, next_action)
else:
next_q = jnp.max(self.net.apply(params_target, next_state), axis=-1, keepdims=True)
return jax.lax.stop_gradient(reward + (1.0 - done) * self.discount * next_q)
@partial(jax.jit, static_argnums=0)
def _calculate_loss_and_abs_td(
self,
q: jnp.ndarray,
target: jnp.ndarray,
weight: np.ndarray,
) -> jnp.ndarray:
td = target - q
if self.loss_type == "l2":
loss = jnp.mean(jnp.square(td) * weight)
elif self.loss_type == "huber":
loss = jnp.mean(huber(td) * weight)
return loss, jax.lax.stop_gradient(jnp.abs(td))
@partial(jax.jit, static_argnums=0)
def _loss(
self,
params: hk.Params,
params_target: hk.Params,
state: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
done: np.ndarray,
next_state: np.ndarray,
weight: np.ndarray,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
q = self._calculate_value(params, state, action)
target = self._calculate_target(params, params_target, reward, done, next_state)
return self._calculate_loss_and_abs_td(q, target, weight)
|
name1=input("name")
name2=input("clg name")
name3=input("cgpa")
print("my name is",name1,"studying in",name2,"cgpa is",name3)
|
from flask import Flask
import pickle
import pandas as pd
import numpy as np
import re, string, nltk
from nltk.corpus import stopwords
app = Flask(__name__)
@app.route("/")
def hello():
return "Welcome to machine learning model APIs!"
if __name__ == '__main__':
app.run(debug=True) |
"""
Source: https://github.com/simonqbs/cayennelpp-python
"""
import struct
import math
LPP_DIGITAL_INPUT = 0 # 1 byte
LPP_DIGITAL_OUTPUT = 1 # 1 byte
LPP_ANALOG_INPUT = 2 # 2 bytes, 0.01 signed
LPP_ANALOG_OUTPUT = 3 # 2 bytes, 0.01 signed
LPP_LUMINOSITY = 101 # 2 bytes, 1 lux unsigned
LPP_PRESENCE = 102 # 1 byte, 1
LPP_TEMPERATURE = 103 # 2 bytes, 0.1°C signed
LPP_RELATIVE_HUMIDITY = 104 # 1 byte, 0.5% unsigned
LPP_ACCELEROMETER = 113 # 2 bytes per axis, 0.001G
LPP_BAROMETRIC_PRESSURE = 115 # 2 bytes 0.1 hPa Unsigned
LPP_GYROMETER = 134 # 2 bytes per axis, 0.01 °/s
LPP_GPS = 136 # 3 byte lon/lat 0.0001 °, 3 bytes alt 0.01 meter
# Data ID + Data Type + Data Size
LPP_DIGITAL_INPUT_SIZE = 3 # 1 byte
LPP_DIGITAL_OUTPUT_SIZE = 3 # 1 byte
LPP_ANALOG_INPUT_SIZE = 4 # 2 bytes, 0.01 signed
LPP_ANALOG_OUTPUT_SIZE = 4 # 2 bytes, 0.01 signed
LPP_LUMINOSITY_SIZE = 4 # 2 bytes, 1 lux unsigned
LPP_PRESENCE_SIZE = 3 # 1 byte, 1
LPP_TEMPERATURE_SIZE = 4 # 2 bytes, 0.1°C signed
LPP_RELATIVE_HUMIDITY_SIZE = 3 # 1 byte, 0.5% unsigned
LPP_ACCELEROMETER_SIZE = 8 # 2 bytes per axis, 0.001G
LPP_BAROMETRIC_PRESSURE_SIZE = 4 # 2 bytes 0.1 hPa Unsigned
LPP_GYROMETER_SIZE = 8 # 2 bytes per axis, 0.01 °/s
LPP_GPS_SIZE = 11 # 3 byte lon/lat 0.0001 °, 3 bytes alt 0.01 meter
class CayenneLPP:
def __init__(self):
self.buffer = bytearray()
def get_buffer(self):
return self.buffer
def reset(self):
self.buffer = bytearray()
def get_size(self):
return len(self.buffer)
def add_temperature(self, channel, value):
val = math.floor(value * 10);
self.buffer.extend(struct.pack('b', channel))
self.buffer.extend(struct.pack('b', LPP_TEMPERATURE))
self.buffer.extend(struct.pack('b', val >> 8))
self.buffer.extend(struct.pack('b', val))
def add_relative_humidity(self, channel, value):
val = math.floor(value * 2)
self.buffer.extend(struct.pack('b', channel))
self.buffer.extend(struct.pack('b', LPP_RELATIVE_HUMIDITY))
self.buffer.extend(struct.pack('b', val))
def add_digital_input(self, channel, value):
self.buffer.extend(struct.pack('b', channel))
self.buffer.extend(struct.pack('b', LPP_DIGITAL_INPUT))
self.buffer.extend(struct.pack('b', value))
def add_digital_output(self, channel, value):
self.buffer.extend(struct.pack('b', channel))
self.buffer.extend(struct.pack('b', LPP_DIGITAL_OUTPUT))
self.buffer.extend(struct.pack('b', value))
def add_analog_input(self, channel, value):
val = math.floor(value * 100)
self.buffer.extend(struct.pack('b', channel))
self.buffer.extend(struct.pack('b', LPP_ANALOG_INPUT))
self.buffer.extend(struct.pack('b', val >> 8))
self.buffer.extend(struct.pack('b', val))
def add_analog_output(self, channel, value):
val = math.floor(value * 100)
self.buffer.extend(struct.pack('b', channel))
self.buffer.extend(struct.pack('b', LPP_ANALOG_OUTPUT))
self.buffer.extend(struct.pack('b', val >> 8))
self.buffer.extend(struct.pack('b', val))
def add_luminosity(self, channel, value):
self.buffer.extend(struct.pack('b', channel))
self.buffer.extend(struct.pack('b', LPP_LUMINOSITY))
self.buffer.extend(struct.pack('b', value >> 8))
self.buffer.extend(struct.pack('b', value))
def add_presence(self, channel, value):
self.buffer.extend(struct.pack('b', channel))
self.buffer.extend(struct.pack('b', LPP_PRESENCE))
self.buffer.extend(struct.pack('b', value))
def add_accelerometer(self, channel, x, y, z):
vx = math.floor(x * 1000)
vy = math.floor(y * 1000)
vz = math.floor(z * 1000)
self.buffer.extend(struct.pack('b', channel))
self.buffer.extend(struct.pack('b', LPP_ACCELEROMETER))
self.buffer.extend(struct.pack('b', vx >> 8))
self.buffer.extend(struct.pack('b', vx))
self.buffer.extend(struct.pack('b', vy >> 8))
self.buffer.extend(struct.pack('b', vy))
self.buffer.extend(struct.pack('b', vz >> 8))
self.buffer.extend(struct.pack('b', vz))
def add_barometric_pressure(self, channel, value):
val = math.floor(value * 10)
self.buffer.extend(struct.pack('b', channel))
self.buffer.extend(struct.pack('b', LPP_BAROMETRIC_PRESSURE))
self.buffer.extend(struct.pack('b', val >> 8))
self.buffer.extend(struct.pack('b', val))
def add_gryrometer(self, channel, x, y, z):
vx = math.floor(x * 100)
vy = math.floor(y * 100)
vz = math.floor(z * 100)
self.buffer.extend(struct.pack('b', channel))
self.buffer.extend(struct.pack('b', LPP_GYROMETER))
self.buffer.extend(struct.pack('b', vx >> 8))
self.buffer.extend(struct.pack('b', vx))
self.buffer.extend(struct.pack('b', vy >> 8))
self.buffer.extend(struct.pack('b', vy))
self.buffer.extend(struct.pack('b', vz >> 8))
self.buffer.extend(struct.pack('b', vz))
def add_gps(self, channel, latitude, longitude, meters):
lat = math.floor(latitude * 10000)
lon = math.floor(longitude * 10000)
alt = math.floor(meters * 100)
self.buffer.extend(struct.pack('b', channel))
self.buffer.extend(struct.pack('b', LPP_GPS))
self.buffer.extend(struct.pack('b', lat >> 16))
self.buffer.extend(struct.pack('b', lat >> 8))
self.buffer.extend(struct.pack('b', lat))
self.buffer.extend(struct.pack('b', lon >> 16))
self.buffer.extend(struct.pack('b', lon >> 8))
self.buffer.extend(struct.pack('b', lon))
self.buffer.extend(struct.pack('b', alt >> 16))
self.buffer.extend(struct.pack('b', alt >> 8))
self.buffer.extend(struct.pack('b', alt)) |
from pyspark.sql import SparkSession
import time
import sys
spark = SparkSession.builder.appName("appName").getOrCreate()
sc = spark.sparkContext
start = int(round(time.time() * 1000))
records = sc.textFile(sys.argv[1])
rows = records.map(lambda line: line.split("\t"))
userToFavouriteProducts=rows.map(lambda x: (x[2],x[6] +"-"+ x[1])).groupByKey().map(lambda x: {x[0]: sorted(list(x[1]))[-10:]}).collect()
end = int(round(time.time() * 1000))
out_file= open(sys.argv[2], "w+")
for row in sorted(userToFavouriteProducts):
out_file.write(str(row))
out_file.write("\n")
out_file.close()
print (end - start)
|
# -*- coding: utf-8 -*-
students = [
{'name': '张三', 'chinese': '84', 'math': '95', 'english': '65', 'total': 195},
{'name': '李四', 'chinese': '60', 'math': '68', 'english': '65', 'total': 195},
{'name': '王五', 'chinese': '75', 'math': '79', 'english': '65', 'total': 195},
{'name': '赵六', 'chinese': '99', 'math': '65', 'english': '65', 'total': 195},
]
stu_format = "{}\t\t{}\t\t{}\t\t{}\t\t{}"
name = input("请输入学生姓名:")
for stu in students:
if name == stu["name"]:
print("姓名\t\t语文\t\t数学\t\t英语\t\t总分")
print(stu_format.format(*stu.values()))
break
else:
print("该学生不存在!!!")
|
__author__ = '@tomereyz'
import argparse
import os
DOCKERFILE_TEMPLATE = """
{architecture_dependant}
RUN apt-get update
WORKDIR /
RUN apt-get install -y openssh-server
RUN apt-get install -y sudo
RUN apt-get install -y python
RUN apt-get install -y gdb
RUN apt-get install -y git
RUN apt-get install -y vim
RUN apt-get install -y gcc
RUN apt-get install -y strace
{challenge_template}
RUN mkdir /var/run/sshd
RUN echo 'root:default' | chpasswd
RUN sed -i 's/[#]*PermitRootLogin [a-zA-Z\-]*/PermitRootLogin yes/' /etc/ssh/sshd_config
RUN sed -i 's/[#]*UsePrivilegeSeparation [a-zA-Z\-]*/UsePrivilegeSeparation no/' /etc/ssh/sshd_config
RUN sed -i 's/[#]*Banner [a-zA-Z\-\/\.]*/Banner \/etc\/banner/' /etc/ssh/sshd_config
RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd
RUN echo "export VISIBLE=now" >> /etc/profile
RUN sysctl -w kernel.dmesg_restrict=1
RUN chmod 1733 /tmp /var/tmp /dev/shm
RUN git clone https://github.com/longld/peda.git /usr/bin/peda
RUN echo "source /usr/bin/peda/peda.py" >> ~/.gdbinit
RUN chmod 0755 /usr/bin/peda/*.py
RUN chmod 0755 /usr/bin/peda/lib/*.py
RUN sysctl -w kernel.randomize_va_space={aslr}
EXPOSE 22
CMD ["/usr/sbin/sshd", "-D"]
"""
CHALLENGE_TEMPLATE = """
COPY {architecture_challenges}/{challenge_name}/* /home/{challenge_name}/
RUN adduser {challenge_name} ; adduser {challenge_name}_root ; usermod -G {challenge_name} {challenge_name} ; usermod -G {challenge_name}_root {challenge_name}_root ; /home/{challenge_name}/compile.sh /home/{challenge_name}
RUN chown {challenge_name}_root /home/{challenge_name}/flag ; chown {challenge_name}_root:{challenge_name} /home/{challenge_name}/{challenge_name} ; chown {challenge_name}:{challenge_name} /home/{challenge_name}/{challenge_name}.c /home/{challenge_name}/compile.sh ; chown root:root /home/{challenge_name}_root
RUN chmod 0400 /home/{challenge_name}/flag ; chmod 4550 /home/{challenge_name}/{challenge_name} ; chmod 0440 /home/{challenge_name}/{challenge_name}.c /home/{challenge_name}/compile.sh
RUN echo '{challenge_name}:{challenge_name}' | chpasswd ; echo '{challenge_name}_root:default' | chpasswd ; echo "source /usr/bin/peda/peda.py" > /home/{challenge_name}/.gdbinit
"""
class DockerFile(object):
def __init__(self, sources, docker_name, no_cache, port, build_run, arch, aslr):
"""
Generate dockerfile
:param sources:
:param docker_name:
:param no_cache:
:param port:
:param build_run:
"""
self.sources = sources
self.dockerfile = None
self.docker_name = docker_name
self.no_cache = '--no-cache' if no_cache else ''
self.port = port
self.build_run = build_run
self.arch = arch
self.aslr = True if aslr == 'yes' else False
def __enter__(self):
self.generate()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return
def _format_template(self):
challenge_template = ''
if self.arch == 'i386':
architecture_challenges = "exercises-i386-aslr" if self.aslr else 'exercises-i386'
architecture_dependant = "FROM i386/ubuntu\nCOPY {arch_c}/banner /etc/banner\nCOPY {arch_c}/motd /etc/motd".format(
arch_c=architecture_challenges)
else:
architecture_challenges = "exercises-arm32v7-aslr" if self.aslr else 'exercises-arm32v7'
architecture_dependant = "FROM armhf/ubuntu\nCOPY qemu-arm-static /usr/bin/\nCOPY {arch_c}/motd /etc/motd\nCOPY {arch_c}/banner /etc/banner".format(
arch_c=architecture_challenges)
for f in self.sources:
challenge_template += CHALLENGE_TEMPLATE.format(challenge_name=f.split('.c')[0],
architecture_challenges=architecture_challenges)
dockerfile = DOCKERFILE_TEMPLATE.format(architecture_dependant=architecture_dependant,
challenge_template=challenge_template,
aslr='2' if self.aslr else '0')
self.dockerfile = dockerfile
def _docker_build_run(self):
interactive = raw_input(
'execute `sudo docker stop $(sudo docker ps -a -q) ; sudo docker rm $(sudo docker ps -a -q)` y/n?')
if interactive == 'y':
os.system('sudo docker stop $(sudo docker ps -a -q) ; sudo docker rm $(sudo docker ps -a -q)')
os.system('sudo docker build {no_cache} -t {docker_name} .'.format(no_cache=self.no_cache,
docker_name=self.docker_name))
os.system(
'sudo docker run --privileged --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -d -p {port}:22 --name {docker_name} {docker_name}'.format(
port=self.port, docker_name=self.docker_name))
def generate(self):
self._format_template()
with open('Dockerfile', 'w') as f:
f.write(self.dockerfile)
if self.build_run:
self._docker_build_run()
def main(port, arch, sources, docker_name, aslr, no_cache=True, build_run=False):
with DockerFile(sources=sources, docker_name=docker_name, port=port, no_cache=no_cache,
build_run=build_run, arch=arch, aslr=aslr) as handle:
print handle.sources
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scan directory and prepares challenges & Flags')
parser.add_argument('-s', '--sources', help='challenges sources names', required=True, dest='sources')
parser.add_argument('-d', '--docker-name', help='docker name', required=True, dest='docker_name')
parser.add_argument('-ca', '--no-cache', help='use cache when building docker', required=False, dest='no_cache',
action='store_false')
parser.add_argument('-r', '--build-run', help='build and run docker', required=False, dest='build_run',
action='store_true')
parser.add_argument('-p', '--port', help='running docker port', required=True, dest='port')
parser.add_argument('-a', '--architecture', help='i386/arm32v7', required=True, dest='arch')
parser.add_argument('-as', '--aslr', help='yes/no', required=True, dest='aslr')
main(**vars(parser.parse_args()))
|
from django.apps import AppConfig
class AssineFacilTVAppConfig(AppConfig):
name = 'AssineFacilTV'
verbose_name = 'Assine Fácil TV'
|
import numpy as np
import matplotlib.pyplot as plt
def sample_function(x=None, nfuncs=10, ndata=100):
if x is None: x = np.sort(np.random.uniform(-10, 10, ndata)[:, None], 0)
xs = np.tile(x, (nfuncs, )).T
a = np.random.uniform(-10, 10, size=(nfuncs, 1))
b = np.random.uniform(-2, 2, size=(nfuncs, 1))
c = np.random.uniform(1, 5, size=(nfuncs, 1))
return c*np.abs(a+xs)+b
def piecewise_function(inputs):
pass
def sample_inputs(nfuncs, ndata):
xs = np.random.uniform(-10, 10, size=(nfuncs, ndata))
xs = np.sort(xs, 1)
return xs[:,:,None]
def sample_gps(nfuncs, ndata, ker):
xs = sample_inputs(nfuncs, ndata)
fgp = [sample_gpp(x, 1, kernel=ker) for x in xs]
return xs, np.concatenate(fgp, axis=0)
def sample_data(nf, nd, ker):
xs, ys = sample_gps(nf, nd, ker)
return xs, ys, np.concatenate((xs[:, :, 0], ys), axis=1) # [nf,nd,1], [nf, nd], [nf,2*nd]
|
import functools
import sys
import time
import warnings
__author__ = 's.rozhin'
trace_on = True
def trace(func):
@functools.wraps(func)
def inner(*args, **kwargs):
print(func.__name__, args, kwargs)
return func(*args, **kwargs)
return inner if trace_on else func
def timethis(func):
@functools.wraps(func)
def inner(*args, **kwargs):
start = time.clock()
res = func(*args, **kwargs)
print("Func time is %f" % (time.clock() - start))
return res
return inner
def call_count(func):
@functools.wraps(func)
def inner(*args, **kwargs):
inner.calls_count += 1
print("%s calls count %d" % (func.__name__, inner.calls_count))
return func(*args, **kwargs)
inner.calls_count = 0
return inner
def once(func):
@functools.wraps(func)
def inner(*args, **kwargs):
if not inner.called:
inner.result = func(*args, **kwargs)
inner.called = True
return inner.result
inner.called = False
inner.result = None
return inner
def obsolete(func):
code = func.__code__
warnings.warn_explicit(
func.__name__ + " is obsolete.",
category=DeprecationWarning,
filename=code.co_filename,
lineno=code.co_firstlineno + 1)
return func
def handeled_trace(handle):
def decorator_factory(func):
@functools.wraps(func)
def inner(*args, **kwargs):
inner.calls += 1
handle.write("Calls count {}\n".format(inner.calls))
handle.write("%s args is %d %d\n" % (func.__name__, len(args), len(kwargs.items())))
return func(*args, **kwargs)
inner.calls = 0
return inner if trace_on else func
return decorator_factory
def pre_validate(cond, message):
def decorator_factory(func):
@functools.wraps(func)
def inner(*args, **kwargs):
assert cond(*args, **kwargs), message
return func(*args, **kwargs)
return inner
return decorator_factory
def post_validate(cond, message):
def decorator_factory(func):
@functools.wraps(func)
def inner(*args, **kwargs):
res = func(*args, **kwargs)
assert cond(res), message
return res
return inner
return decorator_factory
@call_count
@handeled_trace(sys.stderr)
def simple_func(x):
print("I'm a simple func")
return x + 2
@timethis
def sleeping_func():
time.sleep(1)
return 1
@obsolete
def obsolete_func():
print("ololo")
@pre_validate(lambda num: num is not None, "num should be digit")
def print_num(num):
print(num)
def sum_two(one, two):
return one + two
sum_one = functools.partial(sum_two, 42)
@post_validate(lambda x: x is not None, "Should not return None")
def return_none():
return None
# @functools.singledispatch # doesn't work in 2.7
def print_type(obj):
type_name = type(obj).__name__
print("Unknown type name" + type_name)
# @print_type.register(int) # doesn't work in 2.7
def _(obj):
print("Type is int")
# @print_type.register(str) # doesn't work in 2.7
def _(obj):
print("Type is string")
if __name__ == '__main__':
print(simple_func(22))
print(simple_func(6))
print(simple_func.__name__)
print(sleeping_func())
obsolete_func()
print_num(1)
# print_num(None) # AssertionError: num should be digit
# return_none() # AssertionError: Should not return None
print(sum_one(33))
print_type(22)
print_type("aa")
print_type(22.3)
print(functools.reduce(lambda acc, x: acc - x, [1, 2, 3, 4], 0))
|
#!/usr/bin/env python
import os
import sys
import itertools
import subprocess
from optparse import OptionParser
import boto.ec2.connection as ec2
def _get_instances():
connection = ec2.EC2Connection()
instances = map(lambda r: r.instances, connection.get_all_instances())
return list(itertools.chain.from_iterable(instances))
def _print_instances(instances, show_terminated=False):
"""Print out list of hosts. Set only_running to false
to also print out turned off machines."""
for index, instance in enumerate(instances):
if instance.state == "running" or show_terminated:
sys.stdout.write(("{index:>4}: {name:<20} "
"{instance.ip_address:<16} "
"{launch_time:<12} {instance.id:<12} "
"{instance.image_id:<13}\n").format(
index=index,
instance=instance,
launch_time=instance.launch_time[:10],
name=instance.tags.get("Name", "no name")
))
def _choose_host():
instances = _get_instances()
if len(instances) == 1:
return instances[0].public_dns_name
_print_instances(instances)
index = raw_input("\nChoose AMI instance (None): ")
host = None
if index.isdigit() and int(index) < len(instances):
host = instances[int(index)].public_dns_name
return host
def _parse_options():
parser = OptionParser()
# Options for the daemonize module.
parser.add_option("-t", "--tunnel", action="store_false", dest="tunnel",
default=False,
help="Open a tunnel on port 8000 of the remote.")
return parser.parse_args()
def connect(hostname, options):
key = "$AWS_DIR/key_pairs/$EC2_KEYPAIR.pem"
cmd = "ssh %s -i %s ubuntu@%s" % (options, key, hostname)
print "Executing ", cmd
process = subprocess.Popen(cmd, shell=True)
process.wait()
if __name__ == "__main__":
if "EC2_KEYPAIR" not in os.environ:
print "EC2_KEYPAIR environment variable not defined."
sys.exit(1)
ssh_options = ""
options, args = _parse_options()
if options.tunnel:
localhost = os.environ["HOSTNAME"]
ssh_options = "-R 8000:" + localhost + ":8000"
hostname = _choose_host()
if hostname:
connect(hostname, ssh_options)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18/11/9 15:24
# @Author : lijian
# @Desc : https://leetcode.com/problems/maximum-depth-of-binary-tree/
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
树的最大深度,DFS即可
"""
def dfs(root):
if root is None:
return 0
return 1+max(dfs(root.left), dfs(root.right))
return dfs(root) |
from flask import Flask,render_template,url_for,request,redirect,make_response,url_for,make_response
import os,string
import sqlite3
import time
import entry,pay,sale_j,shopinfo
app=Flask(__name__)
@app.route('/input/',methods=['GET','POST'])
def input():
username=request.cookies.get('username')
password=request.cookies.get('password')
shopid=request.cookies.get('shopid')
ip=request.cookies.get('ip')
if not username:
return redirect('/')
posdb_name=str(request.cookies.get('posdb_name'))
instance_name=str(request.cookies.get('instance_name'))
dt=time.strftime('%Y%m%d')
sale_sum=0
pay_sum=0
goodsinfo=shopinfo.showgoods(shopid.upper())
if request.method=='POST':
dt=time.strftime('%Y%m%d')
time1=time.strftime('%H:%M:%S')
reqtime=time.strftime('%H%M%S')
listno=1
sublistno=1
pos_id='P001'
cashier_id=1001
placeno=000000
amount=1
disc_value=0
vipdisc_value=0
item_type='a'
v_type='8'
disc_type='n'
x=1
flag1='0'
flag2='0'
flag3='0'
trainflag='0'
goodsid=request.form.get('goodsid')
vgno=goodsid
use_goodsno=goodsid
goodscata=shopinfo.showgoodscata(shopid,goodsid)
groupno=str(goodscata[0])
deptno=str(goodscata[1])
goodsno=str(goodscata[2])
sales_amount_input=request.form.get('sales_amount')
def isNum(value):
try:
float(value) + 1
except ValueError:
return False
if sales_amount_input=='' or isNum(sales_amount_input)==False or float(sales_amount_input)==0 or len(sales_amount_input)>9:
sales_amount=0
entries=entry.show(dt,shopid)
inputmsg="The input value not qualified"
sale_sum=pay.sum_sale(dt,shopid)
pay_sum=pay.sum_pay(dt,shopid)
return render_template("input.html",inputmsg=inputmsg,pay_sum=pay_sum,sale_sum=sale_sum,username=username,shopid=shopid,sales_amount=sales_amount,time1=time1,dt=dt,ip=ip,entries=entries,goodsinfo=goodsinfo)
if float(sales_amount_input)<0 :
item_type='r'
sales_amount=float(sales_amount_input)
item_value=sales_amount
price=abs(item_value)
sheetid=str(shopid)+str(dt)+str(reqtime)
sql_value=(shopid,1001,dt,time1,reqtime,listno,sublistno,pos_id,cashier_id,vgno,goodsno,placeno,groupno,deptno,amount,item_value,disc_value,vipdisc_value,item_type,v_type,disc_type,x,flag1,flag2,flag3,trainflag,price,use_goodsno,sheetid)
sale_j.ins(sql_value)
salemsg=os.popen('sh upload_sale.sh '+sheetid+' '+posdb_name+' '+instance_name).read()
r_pay=pay.pay_ins(dt,shopid,item_value,sheetid)
paymsg=os.popen('sh upload_pay.sh '+sheetid+' '+posdb_name+' '+instance_name).read()
pay_sum=pay.sum_pay(dt,shopid)
sale_sum=pay.sum_sale(dt,shopid)
entries=entry.show(dt,shopid)
return render_template("input.html",username=username,shopid=shopid,sales_amount=sales_amount,paymsg=paymsg,time1=time1,dt=dt,pay_sum=pay_sum,sale_sum=sale_sum,ip=ip,entries=entries,goodsinfo=goodsinfo)
else:
entries=entry.show(dt,shopid)
pay_sum=pay.sum_sale(dt,shopid)
sale_sum=pay.sum_sale(dt,shopid)
return render_template("input.html",goodsinfo=goodsinfo,username=username,shopid=shopid,sale_sum=sale_sum,pay_sum=pay_sum,entries=entries,ip=ip)
@app.route('/payfor/',methods=['GET','POST'])
def payfor():
username=request.cookies.get('username')
password=request.cookies.get('password')
shopid=request.cookies.get('shopid')
ip=request.cookies.get('ip')
if not username:
return redirect('/')
username=request.cookies.get('username')
password=request.cookies.get('password')
shopid=request.cookies.get('shopid')
ip=request.cookies.get('ip')
posdb_name=str(request.cookies.get('posdb_name'))
instance_name=str(request.cookies.get('instance_name'))
dt=time.strftime('%Y%m%d')
reqtime=time.strftime('%H%M%S')
sheetid=str(shopid)+str(dt)+str(reqtime)
if request.method=='POST':
pay_amount=request.form.get('pay_amount')
if pay_amount=='':
remsg='pay_amount is not null.'
return render_template('pay_input.html',username=username,shopid=shopid,pay_amount=pay_amount,dt=dt,ip=ip,remsg=remsg)
pay_amount=float(pay_amount)
pay_value=pay.sum_pay(dt,shopid)
entries=entry.show(dt,shopid)
r_pay=pay.pay_ins(dt,shopid,pay_amount,sheetid)
remsg=os.popen('sh upload_pay.sh '+sheetid+' '+posdb_name+' '+instance_name).read()
return render_template("pay_input.html",username=username,shopid=shopid,pay_amount=pay_amount,dt=dt,pay_value=pay_value,ip=ip,entries=entries,r_pay=r_pay,remsg=remsg)
else:
entries=entry.show(dt,shopid)
pay_value=pay.sum_pay(dt,shopid)
return render_template("pay_input.html",username=username,shopid=shopid,pay_value=pay_value,entries=entries)
@app.route('/',methods=['GET','POST'])
def login():
myshop=shopinfo.showshop()
if request.method=='POST':
ip=request.headers.get('X-Real-Ip', request.remote_addr)
ipmask=ip[0:7]
viplist=['10.228','158.207','158.143']
for vip in viplist:
ipcheck=ip.find(vip)
if ipcheck==0:
ifvip=(ipcheck==0)
break
else:
ifvip=False
username=request.form.get('username')
password=request.form.get('password')
shopid=request.form.get('shopid')
conn=sqlite3.connect('mxwg.db')
cur_login=conn.cursor()
sql_login='select username,password,shopid,ip,posdb_name,instance_name from user where username=? and password=? and shopid=? and (substr(ip,1,7)=? or ?)'
login_value=(username,password,shopid,ipmask,ifvip)
cur_login.execute(sql_login,login_value)
row=cur_login.fetchone()
cur_login.close()
conn.close()
if row==None:
msg='Login Failed!'
return render_template("login.html",username=username,myshop=myshop,msg=msg,ip=ip,ifvip=ifvip)
else:
posdb_name=str(row[4])
instance_name=str(row[5])
redirect_to_index = redirect('/input/')
response = make_response(redirect_to_index)
response.set_cookie('username',value=username,max_age=600)
response.set_cookie('password',value=password,max_age=600)
response.set_cookie('shopid',value=shopid,max_age=600)
response.set_cookie('ip',value=ip,max_age=600)
response.set_cookie('posdb_name',value=posdb_name,max_age=600)
response.set_cookie('instance_name',value=instance_name,max_age=600)
return response
else:
return render_template('login.html',myshop=myshop)
if __name__ == '__main__':
app.debug=True
app.run(host='0.0.0.0')
|
import os
import numpy as np
from genEM3.data.wkwdata import WkwData, DataSource
run_root = os.path.dirname(os.path.abspath(__file__))
datasources_json_path = os.path.join(run_root, '../../data/debris_clean_added_bboxes2_datasource.json')
datasources_json_path_out = os.path.join(run_root, '../../data/debris_clean_added_bboxes2_wiggle_datasource.json')
wiggles = [
[-35, 0],
[35, 0],
[0, -35],
[0, 35]
]
data_sources = WkwData.datasources_from_json(datasources_json_path)
data_sources_out = []
for data_source in data_sources:
data_sources_out.append(data_source)
id = data_source.id
bbox = data_source.input_bbox
if (data_source.target_class == 1) & (bbox[3:] == [140, 140, 1]):
for wiggle_idx, wiggle in enumerate(wiggles):
id_out = '{:05.0f}'.format(int(id)+(wiggle_idx+1)*1E4)
bbox_out = [bbox[0] + wiggle[0], bbox[1] + wiggle[1], *bbox[2:]]
data_source_out = DataSource(
id=id_out,
input_path=data_source.input_path,
input_bbox=bbox_out,
input_mean=data_source.input_mean,
input_std=data_source.input_std,
target_path=data_source.target_path,
target_bbox=bbox_out,
target_class=data_source.target_class,
target_binary=data_source.target_binary
)
data_sources_out.append(data_source_out)
WkwData.datasources_to_json(data_sources_out, datasources_json_path_out)
|
import datetime
import logging
import sys
from typing import Any
__all__ = ["QuantrtLog"]
logger = logging.getLogger('quantrtlog')
formatter = logging.Formatter("[%(name)s] @ %(asctime)s from %(funcName)s with level %(levelname)s: %(message)s")
console_stream = logging.StreamHandler(stream = sys.stderr)
console_stream.setFormatter(formatter)
file_stream = logging.FileHandler(f"logs/quantrt-log-{datetime.datetime.now().date()}")
file_stream.setFormatter(formatter)
logger.addHandler(console_stream)
logger.addHandler(file_stream)
class QuantrtLog:
@classmethod
def info(cls, message: Any, *args: Any):
logger.info(message, *args)
@classmethod
def debug(cls, message: Any, *args: Any):
logger.debug(message, *args)
@classmethod
def warn(cls, message: Any, *args: Any):
logger.warn(message, *args)
@classmethod
def warning(cls, message: Any, *args: Any):
logger.warning(message, *args)
@classmethod
def error(cls, message: Any, *args: Any):
logger.error(message, *args)
@classmethod
def exception(cls, message: Any, *args: Any):
logger.exception(message, *args)
@classmethod
def set_level(cls, level: int):
logger.setLevel(level)
@classmethod
def get_level(cls) -> int:
return logger.level
|
"""
This module is based on ideas from https://thetinkerpoint.com/2019/02/11/why-the-world-has-gone-crazy/ and code Alex Lamb shared with me: https://github.com/alexlamb/groupdecision via twitter dm.
I attempted to do this in a more functional style, as inspired by my recent watching of https://www.youtube.com/watch?v=vK1DazRK_a0... we'll see how it goes.
"""
from collections import Counter
import random
def add_influence(graph, node, influence_node, min_samples=3):
"""
Add the opinion of the influence_node to the current_node (arg: node).
Then, update the opinion of the current node based on the new sample. First, add the sample, then, get the opinion with the most samples, then, if a random number comes back higher than the current confidence, and there is a new winner, change opinion to the current consensus opinion in the current nodes samples.
Args:
graph (nx.Graph): networkx graph object holding both nodes
node (int): node to have influence (opinion) added from influence_node
influence_node (int): this node has it's opinion added to the first node
min_sample (int): Node must have at least min_samples samples to make a consensus change
"""
node = graph.nodes[node]
node["samples"].append(graph.nodes[influence_node]["opinion"])
node_counter = Counter(node["samples"])
consensus, count = node_counter.most_common(1)[
0
] # returns a list of tuples of (opinion, count)
if (
len(node["samples"]) > min_samples
and consensus > 0
and random.random() > node["confidence"]
):
node["opinion"] = consensus
node["confidence"] = 0.7
# node["samples"] = []
def is_converged(network, states):
num_nodes = len(network.nodes)
if num_nodes in states[-1]:
return True
else:
return False
def get_opinions(network):
opinions = [0, 0, 0]
for i_node in network.nodes:
opinions[network.nodes[i_node]["opinion"]] += 1
return opinions
def all_nodes_update_state(network):
# First version, ensures we update every node on each step
# The differences between state steps will be much larger...
# Simulations will converge much faster
for i_node in network.nodes:
b = random.choice(list(network.neighbors(i_node)))
add_influence(network, i_node, b)
add_influence(network, b, i_node)
return network
def random_update(network):
a = random.choice(list(network.nodes))
b = random.choice(list(network.neighbors(a)))
add_influence(network, a, b, min_samples=6)
add_influence(network, b, a, min_samples=6)
return network
def run(network, update_network, extract_state, converged, num_steps):
"""
network (networkx.graph): graph of nodes to run sim on
update_network (func): receives network, runs a single update step
extract_state (func): receives network, returns the current state from network that you want added to the returned states from the experiment run
converged (func): receives network, return True if network has converged, False otherwise
num_steps (int): number of simulation steps to run
"""
states = []
states.append(extract_state(network))
for i in range(num_steps):
network = update_network(network)
state = extract_state(network)
states.append(state)
if converged(network, states):
break
return states
|
import time
from app.game.input import Input, Command
from app.game.field import Field
from app.game.field_outputter import FieldOutputter
from app.slack import slacker
class BomberFactory:
_bomber_store = {}
@classmethod
def create(cls, channel, users):
bomber = cls.instance(channel)
if bomber is None:
bomber = Bomber(channel, users)
cls._bomber_store[channel] = bomber
bomber.start()
return bomber
@classmethod
def remove(cls, channel):
cls._bomber_store[channel].running = False
del cls._bomber_store[channel]
@classmethod
def instance(cls, channel):
if channel in cls._bomber_store:
return cls._bomber_store[channel]
return None
class Bomber:
def __init__(self, channel, users):
self.channel = channel
self.users = users
self.field = Field(11, 15, users)
self.fetcher = Input(channel, self.reaction_handler)
self.prev_tick = None
self.chat_count = 0
def start(self):
self.running = True
FieldOutputter.post_field(self.channel, self.field)
self.prev_tick = time.time()
while self.running:
self.tick()
time.sleep(0.5)
def tick(self):
tick_time = time.time()
sec = tick_time - self.prev_tick
self.field.proceed_time(sec)
if self.should_send_as_new_message:
FieldOutputter.post_field(self.channel, self.field, new_message=True)
self.chat_count = 0
else:
FieldOutputter.post_field(self.channel, self.field)
self.prev_tick = tick_time
def reaction_handler(self, user, command):
person = self.field.person_by_user(user)
if person is None:
return
if command == Command.up:
self.field.move_top(person)
elif command == Command.right:
self.field.move_right(person)
elif command == Command.down:
self.field.move_bottom(person)
elif command == Command.left:
self.field.move_left(person)
elif command == Command.a:
self.field.put_bomb(person)
def add_chat_count(self):
self.chat_count += 1
@property
def should_send_as_new_message(self):
return self.chat_count >= 8
|
import os
from setuptools import setup
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
requires = [
'flake8',
'flake8-print',
'mock',
'opbeat',
'opbeat_pyramid',
'pyramid',
'pyramid_chameleon',
'pyramid_debugtoolbar',
'pyscss',
'pytest',
'pytest-cov',
'pytest-watch',
'uwsgi',
'waitress',
'zope.interface',
]
setup(
name='mk',
version='0.10.2',
description='Exposing my website, one hacker at a time.',
long_description=README,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='Bailey Stoner',
author_email='monokrome@limpidtech.com',
url='https://monokro.me/',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
test_suite="mk",
entry_points="""\
[paste.app_factory]
main = mk:main
""",
)
|
import functools
# Common function decorator
def eggs_decorator(function):
@functools.wraps(function)
def _eggs(*args, **kwargs):
return function(*args, **kwargs)
return _eggs
def spam():
"""
This is spam function
:return: string
"""
print("spam function called")
return "success!"
@eggs_decorator
def spam_decorated():
"""
This is spam function
:return: string
"""
print("spam function called")
return "success!"
print()
print(">>>>>>> Function decorator <<<<<<<")
print()
print("help spam", help(spam))
print("spam.__name__", spam.__name__)
print()
print("##############################")
print()
print("help spam_decorated", help(spam_decorated))
print("spam_decorated.__name__", spam_decorated.__name__)
print("calling spam", spam())
def spam(eggs):
return 'spam' * (eggs % 5)
output = spam(3)
print(output)
|
from gym.envs.registration import register
from gym_algorithmic.copy_ import CopyEnv
from gym_algorithmic.duplicated_input import DuplicatedInputEnv
from gym_algorithmic.repeat_copy import RepeatCopyEnv
from gym_algorithmic.reverse import ReverseEnv
from gym_algorithmic.reversed_addition import ReversedAdditionEnv
register(
id="Copy-v0",
entry_point="gym_algorithmic:CopyEnv",
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id="RepeatCopy-v0",
entry_point="gym_algorithmic:RepeatCopyEnv",
max_episode_steps=200,
reward_threshold=75.0,
)
register(
id="ReversedAddition-v0",
entry_point="gym_algorithmic:ReversedAdditionEnv",
kwargs={"rows": 2},
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id="ReversedAddition3-v0",
entry_point="gym_algorithmic:ReversedAdditionEnv",
kwargs={"rows": 3},
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id="DuplicatedInput-v0",
entry_point="gym_algorithmic:DuplicatedInputEnv",
max_episode_steps=200,
reward_threshold=9.0,
)
register(
id="Reverse-v0",
entry_point="gym_algorithmic:ReverseEnv",
max_episode_steps=200,
reward_threshold=25.0,
)
|
import pygame
# поле
# поле: 20 в ширину на 40 в высоту, видимая на акране часть: около 15-19 на 10 - 12 (в зависимости от разрешения экрана)
#
# требуется в дальнейшем реализовать камеру (я думаю, песонаж должен быть чуть ниже центра)
board_width = 40
board_height = 25
cell_size = 40
fps = 60
# инициализация времени
clock = pygame.time.Clock()
# создание окна
screen = pygame.display.set_mode()
# screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
# pygame.display.set_caption("") # имя
|
# A navbar with an optional focused element. Uses materializecss.
# All links are also used as mobile links with a slideout menu.
"""
Desktop:
________________________________________________________
| |
| Bidbyte Link1 Link 2 |
| |
________________________________________________________
Mobile:
________________________________________________________
| ____ |
| ____ Bidbyte |
| ____ |
________________________________________________________
"""
from django import template
from core.models import Link
register = template.Library()
@register.inclusion_tag('core/navbar.html', takes_context=True)
def navbar(context, *args, **kwargs):
""" Provides context as a python dictionary into the template.
Kwargs Options:
@param focus: the li element to focus on
@param title: The main text to be used as a logo
@param title_ref: The url to go to after clicking on the title
@param links: A python list of `Link` elements
@param fixed: True if the navbar should be fixed to the top of the screen. Default: False
@param color: The background color. Default: "green lighten-1"
"""
kwargs.setdefault("focus", "")
kwargs.setdefault("title", "Bidbyte")
kwargs.setdefault("title_ref", "/")
kwargs.setdefault("fixed", False)
kwargs.setdefault("color", "green lighten-1")
kwargs.setdefault("links", [])
return {
"logged_in": context['request'].user.is_active,
"title": kwargs["title"],
"title_ref": kwargs["title_ref"],
"focus": kwargs["focus"],
"links": kwargs["links"],
"fixed": kwargs["fixed"],
"color": kwargs["color"],
}
|
'''
2. 还记得求回文字符串那道题吗?现在让你使用递归的方式来求解,亲还能骄傲的说我可以吗?
'''
def is_huiwen(str1):
length = len(str1)
if length <= 1:
return True
elif str1[0] == str1[- 1]:
return is_huiwen(str1[1 : - 1])
else:
return False
print(is_huiwen('上海自来水来自海上'))
print(is_huiwen('双手插口袋'))
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Float64
import std_msgs.msg
import geometry_msgs.msg
import nav_msgs.msg
import sensor_msgs.msg
odom = [0,0]
vel = [0,0]
THRUSTER_COB = 300 #Distance between thrusters and cob
def listener():
rospy.init_node("accel_to_pwm")
rospy.Subscriber("/odom", nav_msgs.msg.Odometry, callback=pwm, callback_args=0)
rospy.Subscriber("/cmd_vel", geometry_msgs.msg.Twist, callback=pwm, callback_args=1)
talker()
rospy.spin()
def pwm(data, current):
global odom, vel
if(current==0):
odom[0] = data.twist.twist.linear.x
odom[1] = data.twist.twist.angular.z
elif current==1:
vel = [data.linear.x, data.angular.z]
#rospy.loginfo(data)
def talker():
global odom, vel
lin_odom = rospy.Publisher("/lin/odom", std_msgs.msg.Float64, queue_size=10)
ang_odom= rospy.Publisher("/ang/odom", std_msgs.msg.Float64, queue_size=10)
lin_cmd = rospy.Publisher("/lin/cmd", std_msgs.msg.Float64, queue_size=10)
ang_cmd = rospy.Publisher("/ang/cmd", std_msgs.msg.Float64, queue_size=10)
rate = rospy.Rate(100)
while not rospy.is_shutdown():
#rospy.loginfo(str(odom)+" "+str(vel))
lin_odom.publish(odom[0])
ang_odom.publish(odom[1])
lin_cmd.publish(vel[0])
ang_cmd.publish(vel[1])
rate.sleep()
listener() |
# coding = utf8
import flask,json
import pymysql
"""post 传参加MySQL处理"""
server=flask.Flask(__name__)#__name__代表当前的python文件。把当前的python文件当做一个服务启动
@server.route('/index',methods=['post'])#第一个参数就是路径,第二个参数支持的请求方式,不写的话默认是get
def index():
username=flask.request.values.get('username')
passwd=flask.request.values.get('passwd')
db = pymysql.connect(host='192.168.254.130', user='root',password='ycc962464', db='pp_list')
cursor = db.cursor()
sql = "select * from user where name='%s';"%(username)
cursor.execute(sql) # 执行sql语句
shujuku_name = cursor.fetchall()
if not shujuku_name:
res = {'msg':'当前用户不存在','msg_code':0}
sql = "INSERT INTO user(name,passowd) VALUES ('%s','%s')"%(username,passwd)
cursor.execute(sql) # 执行sql语句
db.commit() # 同步数据,如果没有这个函数那么程序对数据库的操作,数据不会同步到数据库中,比如没有此函数,程序将数据插入数据库没有报错,但在数据库终端查询时,会发现数据表没有发生改变。再或者每次执行插入语句时,没有调用此函数,那么一旦程序运行过程中报错,之前插入成功的数据也不会保存到数据库中。所以建议每次对表进行修改,插入或删除操作后都调用一次此函数
else:
res ={'mag':'登录成功','mag_code':1}
db.close()
return json.dumps(res,ensure_ascii=False)
server.run(port=7777,debug=True,host='0.0.0.0') |
# -- encoding:utf-8 --
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn import datasets
from keras.layers.core import Dense
from keras.models import Sequential
from keras.optimizers import SGD
import numpy as np
import argparse
import matplotlib.pyplot as plt
ap=argparse.ArgumentParser()
ap.add_argument('-o','--output',required=True,
help='path to the output loss/accuracy plot')
args=vars(ap.parse_args())
print('[INFO] loading MNIST (full) dataset...')
# dataset=datasets.fetch_mldata('MNIST Original')
dataset=datasets.load_digits()
data=dataset.data.astype('float')/255
(trainx,testx,trainy,testy)=train_test_split(data,
dataset.target,test_size=0.25)
lb=LabelBinarizer()
trainy=lb.fit_transform(trainy)
testy=lb.transform(testy)
model=Sequential()
model.add(Dense(16,input_shape=(64,),activation='sigmoid'))
model.add(Dense(16,activation='sigmoid'))
model.add(Dense(10,activation='softmax'))
print('[INFO] training network...')
sgd=SGD(0.01)
model.compile(loss='categorical_crossentropy',optimizer=sgd,
metrics=['accuracy'])
H=model.fit(trainx,trainy,validation_data=(testx,testy),
epochs=40000,batch_size=128)
print('[INFO] evaluating network...')
predictions=model.predict(testx,batch_size=128)
print(classification_report(testy.argmax(axis=1),predictions.argmax(axis=1),
target_names=[str(x) for x in lb.classes_]))
plt.style.use('ggplot')
plt.figure()
plt.plot(np.arange(0,100),H.history['loss'],label='train_loss')
plt.plot(np.arange(0,100),H.history['val_loss'],label='val_loss')
plt.plot(np.arange(0,100),H.history['acc'],label='train_acc')
plt.plot(np.arange(0,100),H.history['val_acc'],label='val_acc')
plt.title('Training LOSS and accuracy')
plt.xlabel('Epoch')
plt.ylabel('loss/accuracy')
plt.legend()
plt.savefig(args['output'])
|
from django.db import models
# Create your models here.
from apps.sistema.models import EntidadBase
from utils.choices import TIPO_DOC_CHOICES
class Persona(EntidadBase):
FISICA = 'F'
JURIDICA = 'J'
TIPO_PERSONA_CHOICES = (
(FISICA, 'Física'),
(JURIDICA, 'Jurídica'),
)
nombres = models.CharField(max_length=255, null=False, blank=True)
apellidos = models.CharField(max_length=255, null=True, blank=True)
tipo_doc = models.CharField(max_length=3, choices=TIPO_DOC_CHOICES, null=False, default='RUC')
nro_doc = models.CharField(max_length=30, null=False, blank=False)
tipo_persona = models.CharField(max_length=1, choices=TIPO_PERSONA_CHOICES, default=JURIDICA)
class Meta:
ordering = ['apellidos', 'nombres', 'id']
|
# Outline Data Structures:
# Point
class Point(object):
def __init__(self, x, y):
self.X = x
self.Y = y
def __str__(self):
return "Point(%s,%s)"%(self.X, self.Y)
# Parcel
class Parcel:
def __init__(self, lowerleft: Point, upperright: Point, land_type):
self.lowerleft = lowerleft
self.upperright = upperright
self.area = (upperright.X - lowerleft.X) * (upperright.Y - lowerleft.Y)
self.land_type = land_type
# (Optional Optimization)
# Store land_type as an integer instead of string
self.belongs_to_collection = -1
# TODO
# Add error-checking for when the lowerleft and upperright are not in fact lower and upper
# Add check to make sure area isn't zero or negative
def __str__(self):
return "Parcel(%s,%s,%s)"%(self.lowerleft, self.upperright, self.land_type)
# LandCollection
class LandCollection:
def __init__(self, id, land_type):
self.id = id
self.land_type = land_type
self.list_of_parcels = []
self.total_land_area = 0
# Helper Functions:
# A function that takes two parcels of land and checks if they have any overlap between them
def ParcelsHaveOverlap(parcel_A, parcel_B):
# Return false if:
# If A is too far left of B
if (parcel_A.upperright.X <= parcel_B.lowerleft.X):
return False
# Or if A is too far right of B
elif (parcel_A.lowerleft.X >= parcel_B.upperright.X):
return False
# Or if A is too far above B
elif (parcel_A.lowerleft.Y >= parcel_B.upperright.Y):
return False
# Or if A is too far below B
elif (parcel_A.upperright.Y <= parcel_B.lowerleft.Y):
return False
return True
# A function that takes two parcels of land and checks if they have any overlap between them
def ParcelsHaveOverlapDebug(parcel_A, parcel_B):
# Return false if:
# If A is too far left of B
if (parcel_A.upperright.X <= parcel_B.lowerleft.X):
print("Reason A:")
return False
# Or if A is too far right of B
elif (parcel_A.lowerleft.X >= parcel_B.upperright.X):
print("Reason B:")
return False
# Or if A is too far above B
elif (parcel_A.lowerleft.Y >= parcel_B.upperright.Y):
print("Reason C:")
return False
# Or if A is too far below B
elif (parcel_A.upperright.Y <= parcel_B.lowerleft.Y):
print("Reason D:")
return False
print("Reason E:")
return True
# A function that checks if a given parcel contains any barren land areas and returns a specific barren land parcel if true, else it returns None
def ParcelContainsBarren(parcel, barren_lands):
# Loop over the list of barren lands
for some_barren_parcel in barren_lands:
if ParcelsHaveOverlap(parcel, some_barren_parcel):
return some_barren_parcel
return None
# Checks if two number ranges contain a non-zero overlap with each other
def RangesHaveOverlap(start1, end1, start2, end2):
return end1 > start2 and end2 > start1
# This function takes in two parcels and determines if they share a non-zero length border with each other regardless of if they share any area overlap.
def ParcelsShareBorder(parcel_A, parcel_B):
# Return False if there is a clear gap between them:
# If A is too far left of B
if (parcel_A.upperright.X < parcel_B.lowerleft.X):
return False
# Or if A is too far right of B
elif (parcel_A.lowerleft.X > parcel_B.upperright.X):
return False
# Or if A is too far above B
elif (parcel_A.lowerleft.Y > parcel_B.upperright.Y):
return False
# Or if A is too far below B
elif (parcel_A.upperright.Y < parcel_B.lowerleft.Y):
return False
# Check if A's bottom edge lines up with either of B's top or bottom edges
if (parcel_A.lowerleft.Y == parcel_B.lowerleft.Y) or (parcel_A.lowerleft.Y == parcel_B.upperright.Y):
# Then we need to check if the edge's ranges also have any overlap:
if RangesHaveOverlap( parcel_A.lowerleft.X, parcel_A.upperright.X, parcel_B.lowerleft.X, parcel_B.upperright.X):
return True
# Check if A's top edge lines up with either of B's top or bottom edges
if (parcel_A.upperright.Y == parcel_B.lowerleft.Y) or (parcel_A.upperright.Y == parcel_B.upperright.Y):
# Then we need to check if the edge's ranges also have any overlap:
if RangesHaveOverlap( parcel_A.lowerleft.X, parcel_A.upperright.X, parcel_B.lowerleft.X, parcel_B.upperright.X):
return True
# Check if A's left edge lines up with either of B's left or right edges
if (parcel_A.lowerleft.X == parcel_B.lowerleft.X) or (parcel_A.lowerleft.X == parcel_B.upperright.X):
# Then we need to check if the edge's ranges also have any overlap:
if RangesHaveOverlap( parcel_A.lowerleft.Y, parcel_A.upperright.Y, parcel_B.lowerleft.Y, parcel_B.upperright.Y):
return True
# Check if A's right edge lines up with either of B's left or right edges
if (parcel_A.upperright.X == parcel_B.lowerleft.X) or (parcel_A.upperright.X == parcel_B.upperright.X):
# Then we need to check if the edge's ranges also have any overlap:
if RangesHaveOverlap( parcel_A.lowerleft.Y, parcel_A.upperright.Y, parcel_B.lowerleft.Y, parcel_B.upperright.Y):
return True
return False
# This function takes in two parcels and determines if they share a non-zero length border with each other regardless of if they share any area overlap or not.
def ParcelsShareBorderDebug(parcel_A, parcel_B):
# Return false if there is a clear gap between them:
# If A is too far left of B
if (parcel_A.upperright.X < parcel_B.lowerleft.X):
print("Reason A:")
return False
# Or if A is too far right of B
elif (parcel_A.lowerleft.X > parcel_B.upperright.X):
print("Reason B:")
return False
# Or if A is too far above B
elif (parcel_A.lowerleft.Y > parcel_B.upperright.Y):
print("Reason C:")
return False
# Or if A is too far below B
elif (parcel_A.upperright.Y < parcel_B.lowerleft.Y):
print("Reason D:")
return False
# Check if A's bottom edge lines up with either of B's top or bottom edges
if (parcel_A.lowerleft.Y == parcel_B.lowerleft.Y) or (parcel_A.lowerleft.Y == parcel_B.upperright.Y):
# Then we need to check if the edge's ranges also have any overlap:
if RangesHaveOverlap( parcel_A.lowerleft.X, parcel_A.upperright.X, parcel_B.lowerleft.X, parcel_B.upperright.X):
print("Reason E:")
return True
# Check if A's top edge lines up with either of B's top or bottom edges
if (parcel_A.upperright.Y == parcel_B.lowerleft.Y) or (parcel_A.upperright.Y == parcel_B.upperright.Y):
# Then we need to check if the edge's ranges also have any overlap:
if RangesHaveOverlap( parcel_A.lowerleft.X, parcel_A.upperright.X, parcel_B.lowerleft.X, parcel_B.upperright.X):
print("Reason F:")
return True
# Check if A's left edge lines up with either of B's left or right edges
if (parcel_A.lowerleft.X == parcel_B.lowerleft.X) or (parcel_A.lowerleft.X == parcel_B.upperright.X):
# Then we need to check if the edge's ranges also have any overlap:
if RangesHaveOverlap( parcel_A.lowerleft.Y, parcel_A.upperright.Y, parcel_B.lowerleft.Y, parcel_B.upperright.Y):
print("Reason G:")
return True
# Check if A's right edge lines up with either of B's left or right edges
if (parcel_A.upperright.X == parcel_B.lowerleft.X) or (parcel_A.upperright.X == parcel_B.upperright.X):
# Then we need to check if the edge's ranges also have any overlap:
if RangesHaveOverlap( parcel_A.lowerleft.Y, parcel_A.upperright.Y, parcel_B.lowerleft.Y, parcel_B.upperright.Y):
print("Reason H:")
return True
print("Reason I:")
return False
# This function is simple enough. It checks if a point would be inside of a parcel by its description
def PointIsInsideParcel(X, Y, parcel):
return (parcel.lowerleft.X <= X and X < parcel.upperright.X) and (parcel.lowerleft.Y <= Y and Y < parcel.upperright.Y)
|
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import ZcoolspiderItem
class ZcoolSpider(CrawlSpider):
name = 'zcool'
allowed_domains = ['zcool.com.cn']
start_urls = ['https://www.zcool.com.cn/?p=1#tab_anchor']
rules = (
Rule(LinkExtractor(allow=r'/?p=\d+#tab_anchor'), follow=True),
Rule(LinkExtractor(allow=r'/work/.+=.html'), callback='parse_item', follow=False),
)
def parse_item(self, response):
img_urls=response.xpath('//div[@class="photo-information-content"]/img/@src').getall()
title=response.xpath('//h2/text()').getall()
title=''.join(title).strip()
item=ZcoolspiderItem(image_urls=img_urls,title=title)
yield item
# item = {}
#item['domain_id'] = response.xpath('//input[@id="sid"]/@value').get()
#item['name'] = response.xpath('//div[@id="name"]').get()
#item['description'] = response.xpath('//div[@id="description"]').get()
# return item
|
'''
URL patterns includes for Figures devsite
'''
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='homepage.html'), name='homepage'),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('django.contrib.auth.urls')),
url(r'^figures/', include('figures.urls', namespace='figures')),
]
|
#!/usr/bin/python3
"""Module creates method add_item which adds all arguments to a\
Python list, and saves the list to a file"""
# import sys to handle command line args (sys.argv[])
import sys
# import json to handle json syntax in imported functions
import json
# save_to_json_file takes python object and dumps
# the object to json string via write()
save_to_json_file = __import__('5-save_to_json_file').save_to_json_file
# load_from_json_file creates a python object
# from a file with json representation (str)
load_from_json_file = __import__('6-load_from_json_file').load_from_json_file
try:
theList = load_from_json_file("add_item.json")
except:
theList = []
for i in range(1, len(sys.argv)):
theList.append(sys.argv[i])
save_to_json_file(theList, "add_item.json")
|
import tornado.ioloop
import tornado.web
import os, sys
import subprocess
import json
import time
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
'''
{'tiaozhuan': [b'2'],
'domain_name': [b'zhoutao990.51xidu.com'],
'page': [b'3'],
'site_id': [b'123'],
'comp': [b'\xe6\x9f\x90\xe6\x9f\x90\xe5\x85\xac\xe5\x8f\xb8'],
'jdomain': [b'1.zhoutao.name1'],
'page1': [b'1'],
'jsite_id': [b'3333'],
'jcomp': [b'\xe6\x9f\x90\xe6\x9f\x902\xe5\x85\xac\xe5\x8f\xb8']}
'''
dirdict = json.load(open('.temp.json','r'))
# pip3 install tornado
class MainHandler(tornado.web.RequestHandler):
def get(self):
print('task begin --------------------------------------------------------------------------------------------------------------------------------')
dirdict = json.load(open('.temp.json','r'))
print(dirdict)
asdf = self.request.arguments
self.a = asdf['uniqcode'][0].decode()
print(asdf)
with open('.uniqcode.txt','r') as f1:
line = f1.readlines()
if not line:line.append('123')
if self.a == line[0].strip():
print('found old uniqcode will finished <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
self.finish()
else:
self.T = asdf['tiaozhuan'][0].decode()
print('self T is ',self.T)
if self.T == '3': #shenghe
if asdf['user_time'][0].decode():
time_file = '.'+asdf['user_time'][0].decode()
#get url list write into txt file
with open('%s'%(time_file), 'w') as f1:
for i in asdf['dirname'][0].decode().split(','):
f1.write(i+'\n')
else:
time_file = '.tempurl.txt'
#get url list write into txt file
with open('%s'%(time_file), 'w') as f1:
for i in asdf['dirname'][0].decode().split(','):
f1.write(i+'\n')
if asdf['user_time'][0].decode():
time = asdf['user_time'][0].decode().replace('-',' ')
cvb = 'echo "%s while read a;do cd /data/web/adsite/.\$a && \cp index.sh index.html -Rf;done < /root/%s "'%(time,time_file)
ddd = '>> /var/spool/cron/root'
print(cvb ,ddd)
subprocess.Popen('%s %s'%(cvb,ddd),shell=True).communicate()
else:
subprocess.Popen('while read a;do cd /data/web/adsite/.$a && \cp index.sh index.html -Rf;done < /root/%s'%(time_file),shell=True).communicate()
elif self.T =='4': #tiaozhuang
if asdf['user_time'][0].decode():
time_file = '.'+asdf['user_time'][0].decode()
#get url list write into txt file
with open('%s'%(time_file), 'w') as f1:
for i in asdf['dirname'][0].decode().split(','):
f1.write(i+'\n')
else:
time_file = '.tempurl.txt'
#get url list write into txt file
with open('%s'%(time_file), 'w') as f1:
for i in asdf['dirname'][0].decode().split(','):
f1.write(i+'\n')
if asdf['user_time'][0].decode():
time = asdf['user_time'][0].decode().replace('-',' ')
cvb = 'echo "%s while read a;do cd /data/web/adsite/.\$a && \cp index.tz index.html -Rf;done < /root/%s "'%(time,time_file)
ddd = '>> /var/spool/cron/root'
print(cvb ,ddd)
subprocess.Popen('%s %s'%(cvb,ddd),shell=True).communicate()
else:
subprocess.Popen('while read a;do cd /data/web/adsite/.$a && \cp index.tz index.html -Rf;done < /root/%s'%(time_file),shell=True).communicate()
elif self.T == '5':
self.cd = asdf['cdomain_name'][0].decode()
self.cp = asdf['page11'][0].decode()
olddict = self.getoldcode(self.cd)
print(olddict)
olddict['cdomain_name'] = self.cd
olddict['src_dir'] = dirdict[self.cp]
self.writejson('/data/web/adtemp/oldcode.json',olddict)
self.change_dir(self.cd,'/data/web/adtemp/changepage.yml')
else:
self.u = asdf['domain_name'][0].decode()
self.p = asdf['page'][0].decode()
self.sid = asdf['site_id'][0].decode()
self.pburl = asdf['pburl'][0].decode()
self.compgongso = asdf['comp'][0].decode()
self.b_city = asdf['b_city'][0].decode()
dict1 = {'domain_name':self.u,'page':self.p,'site_id':self.sid,'comp':self.compgongso,'b_city':self.b_city,'pburl':self.pburl}
#----
self.ju = asdf['jdomain'][0].decode()
self.jp = asdf['page1'][0].decode()
self.jsid = asdf['jsite_id'][0].decode()
self.jcimpgongso = asdf['jcomp'][0].decode()
self.jb_city = asdf['jb_city'][0].decode()
dict2 = {'domain_name':self.ju,'page':self.jp,'site_id':self.jsid,'comp':self.jcimpgongso,'b_city':self.jb_city,}
print(self.T)
print('=====================================')
if self.T == '1':
print('TTTTTT 11')
dict1['src_dir'] = dirdict[self.p]
json1 = '/data/web/adtemp/temp.json'
self.writejson(json1,dict1)
self.configall('.add_nginx.yml')
elif self.T == '2':
print('TTTTTTT 22')
dict1['src_dir'] = dirdict[self.p]
json1 = '/data/web/adtemp/temp.json'
self.writejson(json1,dict1)
self.configall('.add_nginx.yml')
dict2['src_dir'] = dirdict[self.jp]
dict2['llldomain_name'] = dict1['domain_name']
print('dict2 is ',dict2)
json2 = '/data/web/adtemp/temp1.json'
self.writejson(json2,dict2)
self.configall('.add_nginxj.yml')
self.huixie()
print('task end +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
self.finish()
def configall(self,file):
subprocess.Popen('ansible-playbook %s'%(file), shell=True, cwd='/data/web/adtemp').communicate()
def writejson(self,file,dic):
with open(file, 'w') as f1:
f1.write(json.dumps(dic, indent=4))
def getoldcode(self,dname):
oldfile = '/data/web/adsite/.%s/index.html'%(dname)
with open(oldfile, 'r') as f1:
templist = f1.readlines()
aa = {'wxcode':'','comp':'','site_id':''}
a = 0
for i in templist:
if i.startswith('//start'):
a = templist.index(i) + 1
aa['wxcode'] = (templist[a].strip())
continue
if 'compgongsi' in i:
aa['comp'] = i.split('>')[1].split('<')[0]
continue
if 'cnzz' and 'web_id' in i:
aa['site_id'] = i.split('web_id=')[1].split('"')[0]
return aa
def change_dir(self,dname,file):
htmlfile = '/data/web/adsite/.%s/index.html'%(dname)
bb = json.load(open('/data/web/adtemp/oldcode.json', 'r'))['wxcode']
print('bb is',bb)
print('html file is ',htmlfile)
subprocess.Popen('ansible-playbook %s' % (file), shell=True, cwd='/data/web/adtemp').communicate()
with open(htmlfile,'r') as f1:
lines = f1.readlines()
with open(htmlfile,'w') as f2:
for line in lines:
if line.startswith('//start'):
print(line)
lines[lines.index(line) + 1] = bb + '\n'
f2.write(line)
continue
f2.write(line)
def huixie(self):
with open('.uniqcode.txt','w') as f1:
f1.write(self.a)
#self.write("<script>alert('SUCCESSFUL');window.history.back();</script>") #
self.write("<script>alert('SUCCESSFUL');window.location.href=document.referrer</script>") #
application = tornado.web.Application([
(r"/index", MainHandler),
])
if __name__ == "__main__":
application.listen(2345)
tornado.ioloop.IOLoop.instance().start()
|
# -*- coding: utf-8 -*-
from celery import current_app
from celery.bin import worker
from flask_script import Command
class CeleryWorker(Command):
"""Run the celery worker"""
def __call__(self, app=None, *args, **kwargs):
a = current_app._get_current_object()
w = worker.worker(app=a)
options = {
'loglevel': 'INFO',
'concurrency': 2,
'without-gossip': True
}
options.update(kwargs)
w.run(*args, **options)
|
###
# Tree
# Time Complexity: O(n)
# Space Complexity: O(n)
###
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findMode(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
self.result = []
self.curcnt = 0
self.maxcnt = 0
self.pre = None
self.dfs(root)
return self.result
def dfs(self, cur):
if not cur:
return
self.dfs(cur.left)
if self.pre and cur.val == self.pre.val:
self.curcnt += 1
else:
self.curcnt = 1
if self.curcnt == self.maxcnt:
self.result.append(cur.val)
elif self.curcnt > self.maxcnt:
self.result = [cur.val] #assign a list in a function, need to do re[:] = [root.val]
self.maxcnt = self.curcnt
self.pre = cur
self.dfs(cur.right)
#####
# Time Complexity: O(n)
# Space Complexity: O(1)
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findMode(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
self.max_cnt = 0
self.cur_cnt = 0
self.prev = None
self.fill = False
self.dfs(root)
self.re = []
self.prev = None
self.cur_cnt = 0
self.fill = True
self.dfs(root)
return self.re
def dfs(self, root):
if not root:
return
self.dfs(root.left)
if self.prev and self.prev.val == root.val:
self.cur_cnt += 1
else:
self.cur_cnt = 1
if not self.fill:
self.max_cnt = max(self.max_cnt, self.cur_cnt)
else:
if self.cur_cnt == self.max_cnt:
self.re.append(root.val)
self.prev = root
self.dfs(root.right)
|
"""Statistics for the assessment of DA methods.
`Stats` is a data container for ([mostly] time series of) statistics.
It comes with a battery of methods to compute the default statistics.
`Avrgs` is a data container *for the same statistics*,
but after they have been averaged in time (after the assimilation has finished).
Instances of these objects are created by `dapper.da_methods.da_method`
(i.e. "`xp`") objects and written to their `.stats` and `.avrgs` attributes.
.. include:: ../docs/stats_etc.md
"""
import warnings
import numpy as np
import scipy.linalg as sla
import struct_tools
from matplotlib import pyplot as plt
from patlib.std import do_once
from tabulate import tabulate
import dapper.tools.liveplotting as liveplotting
import dapper.tools.series as series
from dapper.dpr_config import rc
from dapper.tools.matrices import CovMat
from dapper.tools.progressbar import progbar
class Stats(series.StatPrint):
"""Contains and computes statistics of the DA methods."""
def __init__(self, xp, HMM, xx, yy, liveplots=False, store_u=rc.store_u):
"""Init the default statistics."""
######################################
# Preamble
######################################
self.xp = xp
self.HMM = HMM
self.xx = xx
self.yy = yy
self.liveplots = liveplots
self.store_u = store_u
self.store_s = any(key in xp.__dict__ for key in
["Lag", "DeCorr"]) # prms used by smoothers
# Shapes
K = xx.shape[0] - 1
Nx = xx.shape[1]
Ko = yy.shape[0] - 1
self.K, self.Ko, self.Nx = K, Ko, Nx
# Methods for summarizing multivariate stats ("fields") as scalars
# Don't use nanmean here; nan's should get propagated!
en_mean = lambda x: np.mean(x, axis=0) # noqa
self.field_summaries = dict(
m = lambda x: en_mean(x), # mean-field
ms = lambda x: en_mean(x**2), # root-mean-square
rms = lambda x: np.sqrt(en_mean(x**2)), # root-mean-square
ma = lambda x: en_mean(np.abs(x)), # mean-absolute
gm = lambda x: np.exp(en_mean(np.log(x))), # geometric mean
)
# Only keep the methods listed in rc
self.field_summaries = struct_tools.intersect(self.field_summaries,
rc.field_summaries)
# Define similar methods, but restricted to sectors
self.sector_summaries = {}
def restrict(fun, inds): return (lambda x: fun(x[inds]))
for suffix, formula in self.field_summaries.items():
for sector, inds in HMM.sectors.items():
f = restrict(formula, inds)
self.sector_summaries['%s.%s' % (suffix, sector)] = f
######################################
# Allocate time series of various stats
######################################
self.new_series('mu' , Nx, field_mean='sectors') # Mean
self.new_series('spread', Nx, field_mean='sectors') # Std. dev. ("spread")
self.new_series('err' , Nx, field_mean='sectors') # Error (mu - truth)
self.new_series('gscore', Nx, field_mean='sectors') # Gaussian (log) score
# To save memory, we only store these field means:
self.new_series('mad' , 1) # Mean abs deviations
self.new_series('skew', 1) # Skewness
self.new_series('kurt', 1) # Kurtosis
if hasattr(xp, 'N'):
N = xp.N
self.new_series('w', N, field_mean=True) # Importance weights
self.new_series('rh', Nx, dtype=int) # Rank histogram
self._is_ens = True
minN = min(Nx, N)
self.do_spectral = np.sqrt(Nx*N) <= rc.comps["max_spectral"]
else:
self._is_ens = False
minN = Nx
self.do_spectral = Nx <= rc.comps["max_spectral"]
if self.do_spectral:
# Note: the mean-field and RMS time-series of
# (i) svals and (ii) umisf should match the corresponding series of
# (i) spread and (ii) err.
self.new_series('svals', minN) # Principal component (SVD) scores
self.new_series('umisf', minN) # Error in component directions
######################################
# Allocate a few series for outside use
######################################
self.new_series('trHK' , 1, Ko+1)
self.new_series('infl' , 1, Ko+1)
self.new_series('iters', 1, Ko+1)
# Weight-related
self.new_series('N_eff' , 1, Ko+1)
self.new_series('wroot' , 1, Ko+1)
self.new_series('resmpl', 1, Ko+1)
def new_series(self, name, shape, length='FAUSt', field_mean=False, **kws):
"""Create (and register) a statistics time series, initialized with `nan`s.
If `length` is an integer, a `DataSeries` (a trivial subclass of
`numpy.ndarray`) is made. By default, though, a `series.FAUSt` is created.
NB: The `sliding_diagnostics` liveplotting relies on detecting `nan`'s
to avoid plotting stats that are not being used.
Thus, you cannot use `dtype=bool` or `int` for stats that get plotted.
"""
# Convert int shape to tuple
if not hasattr(shape, '__len__'):
if shape == 1:
shape = ()
else:
shape = (shape,)
def make_series(parent, name, shape):
if length == 'FAUSt':
total_shape = self.K, self.Ko, shape
store_opts = self.store_u, self.store_s
tseries = series.FAUSt(*total_shape, *store_opts, **kws)
else:
total_shape = (length,)+shape
tseries = series.DataSeries(total_shape, *kws)
register_stat(parent, name, tseries)
# Principal series
make_series(self, name, shape)
# Summary (scalar) series:
if shape != ():
if field_mean:
for suffix in self.field_summaries:
make_series(getattr(self, name), suffix, ())
# Make a nested level for sectors
if field_mean == 'sectors':
for ss in self.sector_summaries:
suffix, sector = ss.split('.')
make_series(struct_tools.deep_getattr(
self, f"{name}.{suffix}"), sector, ())
@property
def data_series(self):
return [k for k in vars(self)
if isinstance(getattr(self, k), series.DataSeries)]
def assess(self, k, ko=None, faus=None,
E=None, w=None, mu=None, Cov=None):
"""Common interface for both `Stats.assess_ens` and `Stats.assess_ext`.
The `_ens` assessment function gets called if `E is not None`,
and `_ext` if `mu is not None`.
faus: One or more of `['f',' a', 'u', 's']`, indicating
that the result should be stored in (respectively)
the forecast/analysis/universal attribute.
Default: `'u' if ko is None else 'au' ('a' and 'u')`.
"""
# Initial consistency checks.
if k == 0:
if ko is not None:
raise KeyError("DAPPER convention: no obs at t=0. Helps avoid bugs.")
if self._is_ens == True:
if E is None:
raise TypeError("Expected ensemble input but E is None")
if mu is not None:
raise TypeError("Expected ensemble input but mu/Cov is not None")
else:
if E is not None:
raise TypeError("Expected mu/Cov input but E is not None")
if mu is None:
raise TypeError("Expected mu/Cov input but mu is None")
# Default. Don't add more defaults. It just gets confusing.
if faus is None:
faus = 'u' if ko is None else 'au'
# TODO 4: for faus="au" (e.g.) we don't need to re-**compute** stats,
# merely re-write them?
for sub in faus:
# Skip assessment if ('u' and stats not stored or plotted)
if k != 0 and ko == None:
if not (self.store_u or self.LP_instance.any_figs):
continue
# Silence repeat warnings caused by zero variance
with np.errstate(divide='call', invalid='call'):
np.seterrcall(warn_zero_variance)
# Assess
stats_now = Avrgs()
if self._is_ens:
self.assess_ens(stats_now, self.xx[k], E, w)
else:
self.assess_ext(stats_now, self.xx[k], mu, Cov)
self.derivative_stats(stats_now)
self.summarize_marginals(stats_now)
self.write(stats_now, k, ko, sub)
# LivePlot -- Both init and update must come after the assessment.
try:
self.LP_instance.update((k, ko, sub), E, Cov)
except AttributeError:
self.LP_instance = liveplotting.LivePlot(
self, self.liveplots, (k, ko, sub), E, Cov)
def write(self, stat_dict, k, ko, sub):
"""Write `stat_dict` to series at `(k, ko, sub)`."""
for name, val in stat_dict.items():
stat = struct_tools.deep_getattr(self, name)
isFaust = isinstance(stat, series.FAUSt)
stat[(k, ko, sub) if isFaust else ko] = val
def summarize_marginals(self, now):
"""Compute Mean-field and RMS values."""
formulae = {**self.field_summaries, **self.sector_summaries}
with np.errstate(divide='ignore', invalid='ignore'):
for stat in list(now):
field = now[stat]
for suffix, formula in formulae.items():
statpath = stat+'.'+suffix
if struct_tools.deep_hasattr(self, statpath):
now[statpath] = formula(field)
def derivative_stats(self, now):
"""Stats that derive from others, and are not specific for `_ens` or `_ext`)."""
try:
now.gscore = 2*np.log(now.spread) + (now.err/now.spread)**2
except AttributeError:
# happens in case rc.comps['error_only']
pass
def assess_ens(self, now, x, E, w):
"""Ensemble and Particle filter (weighted/importance) assessment."""
N, Nx = E.shape
# weights
if w is None:
w = np.ones(N)/N # All equal. Also, rm attr from stats:
if hasattr(self, 'w'):
delattr(self, 'w')
# Use non-weight formula (since w=None) for mu computations.
# The savings are noticeable when rc.comps['error_only'] is noticeable.
now.mu = E.mean(0)
else:
now.w = w
if abs(w.sum()-1) > 1e-5:
raise RuntimeError("Weights did not sum to one.")
now.mu = w @ E
# Crash checks
if not np.all(np.isfinite(E)):
raise RuntimeError("Ensemble not finite.")
if not np.all(np.isreal(E)):
raise RuntimeError("Ensemble not Real.")
# Compute errors
now.err = now.mu - x
if rc.comps['error_only']:
return
A = E - now.mu
# While A**2 is approx as fast as A*A,
# A**3 is 10x slower than A**2 (or A**2.0).
# => Use A2 = A**2, A3 = A*A2, A4=A*A3.
# But, to save memory, only use A_pow.
A_pow = A**2
# Compute variances
var = w @ A_pow
ub = unbias_var(w, avoid_pathological=True)
var *= ub
# Compute standard deviation ("Spread")
s = np.sqrt(var) # NB: biased (even though var is unbiased)
now.spread = s
# For simplicity, use naive (biased) formulae, derived
# from "empirical measure". See doc/unbiased_skew_kurt.jpg.
# Normalize by var. Compute "excess" kurt, which is 0 for Gaussians.
A_pow *= A
now.skew = np.nanmean(w @ A_pow / (s*s*s))
A_pow *= A
now.kurt = np.nanmean(w @ A_pow / var**2 - 3)
now.mad = np.nanmean(w @ abs(A))
if self.do_spectral:
if N <= Nx:
_, s, UT = sla.svd((np.sqrt(w)*A.T).T, full_matrices=False)
s *= np.sqrt(ub) # Makes s^2 unbiased
now.svals = s
now.umisf = UT @ now.err
else:
P = (A.T * w) @ A
s2, U = sla.eigh(P)
s2 *= ub
now.svals = np.sqrt(s2.clip(0))[::-1]
now.umisf = U.T[::-1] @ now.err
# For each state dim [i], compute rank of truth (x) among the ensemble (E)
E_x = np.sort(np.vstack((E, x)), axis=0, kind='heapsort')
now.rh = np.asarray(
[np.where(E_x[:, i] == x[i])[0][0] for i in range(Nx)])
def assess_ext(self, now, x, mu, P):
"""Kalman filter (Gaussian) assessment."""
if not np.all(np.isfinite(mu)):
raise RuntimeError("Estimates not finite.")
if not np.all(np.isreal(mu)):
raise RuntimeError("Estimates not Real.")
# Don't check the cov (might not be explicitly availble)
# Compute errors
now.mu = mu
now.err = now.mu - x
if rc.comps['error_only']:
return
# Get diag(P)
if P is None:
var = np.zeros_like(mu)
elif np.isscalar(P):
var = np.ones_like(mu) * P
else:
if isinstance(P, CovMat):
var = P.diag
P = P.full
else:
var = np.diag(P)
if self.do_spectral:
s2, U = sla.eigh(P)
now.svals = np.sqrt(np.maximum(s2, 0.0))[::-1]
now.umisf = (U.T @ now.err)[::-1]
# Compute stddev
now.spread = np.sqrt(var)
# Here, sqrt(2/pi) is the ratio, of MAD/Spread for Gaussians
now.mad = np.nanmean(now.spread) * np.sqrt(2/np.pi)
def average_in_time(self, kk=None, kko=None, free=False):
"""Avarage all univariate (scalar) time series.
- `kk` time inds for averaging
- `kko` time inds for averaging obs
"""
tseq = self.HMM.tseq
if kk is None:
kk = tseq.mask
if kko is None:
kko = tseq.masko
def average1(tseries):
avrgs = Avrgs()
def average_multivariate(): return avrgs
# Plain averages of nd-series are rarely interesting.
# => Shortcircuit => Leave for manual computations
if isinstance(tseries, series.FAUSt):
# Average series for each subscript
if tseries.item_shape != ():
return average_multivariate()
for sub in [ch for ch in 'fas' if hasattr(tseries, ch)]:
avrgs[sub] = series.mean_with_conf(tseries[kko, sub])
if tseries.store_u:
avrgs['u'] = series.mean_with_conf(tseries[kk, 'u'])
elif isinstance(tseries, series.DataSeries):
if tseries.array.shape[1:] != ():
return average_multivariate()
elif len(tseries.array) == self.Ko+1:
avrgs = series.mean_with_conf(tseries[kko])
elif len(tseries.array) == self.K+1:
avrgs = series.mean_with_conf(tseries[kk])
else:
raise ValueError
elif np.isscalar(tseries):
avrgs = tseries # Eg. just copy over "duration" from stats
else:
raise TypeError(f"Don't know how to average {tseries}")
return avrgs
def recurse_average(stat_parent, avrgs_parent):
for key in getattr(stat_parent, "stat_register", []):
try:
tseries = getattr(stat_parent, key)
except AttributeError:
continue # Eg assess_ens() deletes .weights if None
avrgs = average1(tseries)
recurse_average(tseries, avrgs)
avrgs_parent[key] = avrgs
avrgs = Avrgs()
recurse_average(self, avrgs)
self.xp.avrgs = avrgs
if free:
delattr(self.xp, 'stats')
def replay(self, figlist="default", speed=np.inf, t1=0, t2=None, **kwargs):
"""Replay LivePlot with what's been stored in 'self'.
- t1, t2: time window to plot.
- 'figlist' and 'speed': See LivePlot's doc.
.. note:: `store_u` (whether to store non-obs-time stats) must
have been `True` to have smooth graphs as in the actual LivePlot.
.. note:: Ensembles are generally not stored in the stats
and so cannot be replayed.
"""
# Time settings
tseq = self.HMM.tseq
if t2 is None:
t2 = t1 + tseq.Tplot
# Ens does not get stored in stats, so we cannot replay that.
# If the LPs are initialized with P0!=None, then they will avoid ens plotting.
# TODO 4: This system for switching from Ens to stats must be replaced.
# It breaks down when M is very large.
try:
P0 = np.full_like(self.HMM.X0.C.full, np.nan)
except AttributeError: # e.g. if X0 is defined via sampling func
P0 = np.eye(self.HMM.Nx)
LP = liveplotting.LivePlot(self, figlist, P=P0, speed=speed,
Tplot=t2-t1, replay=True, **kwargs)
# Remember: must use progbar to unblock read1.
# Let's also make a proper description.
desc = self.xp.da_method + " (replay)"
# Play through assimilation cycles
for k, ko, t, _dt in progbar(tseq.ticker, desc):
if t1 <= t <= t2:
if ko is not None:
LP.update((k, ko, 'f'), None, None)
LP.update((k, ko, 'a'), None, None)
LP.update((k, ko, 'u'), None, None)
# Pause required when speed=inf.
# On Mac, it was also necessary to do it for each fig.
if LP.any_figs:
for _name, updater in LP.figures.items():
if plt.fignum_exists(_name) and getattr(updater, 'is_active', 1):
plt.figure(_name)
plt.pause(0.01)
def register_stat(self, name, value):
"""Do `self.name = value` and register `name` as in self's `stat_register`.
Note: `self` is not always a `Stats` object, but could be a "child" of it.
"""
setattr(self, name, value)
if not hasattr(self, "stat_register"):
self.stat_register = []
self.stat_register.append(name)
class Avrgs(series.StatPrint, struct_tools.DotDict):
"""A `dict` specialized for the averages of statistics.
Embellishments:
- `dapper.tools.StatPrint`
- `Avrgs.tabulate`
- `getattr` that supports abbreviations.
"""
def tabulate(self, statkeys=(), decimals=None):
columns = tabulate_avrgs([self], statkeys, decimals=decimals)
return tabulate(columns, headers="keys").replace('␣', ' ')
abbrevs = {'rmse': 'err.rms', 'rmss': 'spread.rms', 'rmv': 'spread.rms'}
# Use getattribute coz it gets called before getattr.
def __getattribute__(self, key):
"""Support deep and abbreviated lookup."""
# key = abbrevs[key] # Instead of this, also support rmse.a:
key = '.'.join(Avrgs.abbrevs.get(seg, seg) for seg in key.split('.'))
if "." in key:
return struct_tools.deep_getattr(self, key)
else:
return super().__getattribute__(key)
# In case of degeneracy, variance might be 0, causing warnings
# in computing skew/kurt/MGLS (which all normalize by variance).
# This should and will yield nan's, but we don't want mere diagnostics
# computations to cause repetitive warnings, so we only warn once.
#
# I would have expected this (more elegant solution?) to work,
# but it just makes it worse.
# with np.errstate(divide='warn',invalid='warn'), warnings.catch_warnings():
# warnings.simplefilter("once",category=RuntimeWarning)
# ...
@do_once
def warn_zero_variance(err, flag):
msg = "\n".join(["Numerical error in stat comps.",
"Probably caused by a sample variance of 0."])
warnings.warn(msg, stacklevel=2)
# Why not do all columns at once using the tabulate module? Coz
# - Want subcolumns, including fancy formatting (e.g. +/-)
# - Want separation (using '|') of attr and stats
# - ...
def align_col(col, pad='␣', missingval='', just=">"):
r"""Align column.
Treats `int`s and fixed-point `float`/`str` especially, aligning on the point.
Example:
>>> xx = [1, 1., 1.234, 12.34, 123.4, "1.2e-3", None, np.nan, "inf", (1, 2)]
>>> print(*align_col(xx), sep="\n")
␣␣1␣␣␣␣
␣␣1.0␣␣
␣␣1.234
␣12.34␣
123.4␣␣
␣1.2e-3
␣␣␣␣␣␣␣
␣␣␣␣nan
␣␣␣␣inf
␣(1, 2)
"""
def split_decimal(x):
x = str(x)
try:
y = float(x)
except ValueError:
pass
else:
if np.isfinite(y) and ("e" not in x.lower()):
a, *b = x.split(".")
if b == []:
b = "int"
else:
b = b[0]
return a, b
return x, False
# Find max nInt, nDec
nInt = nDec = -1
for x in col:
ints, decs = split_decimal(x)
if decs:
nInt = max(nInt, len(ints))
if decs != "int":
nDec = max(nDec, len(decs))
# Format entries. Floats get aligned on point.
def frmt(x):
if x is None:
return missingval
ints, decs = split_decimal(x)
x = f"{ints.rjust(nInt, pad)}"
if decs == "int":
if nDec >= 0:
x += pad + pad*nDec
elif decs:
x += "." + f"{decs.ljust(nDec, pad)}"
else:
x = ints
return x
# Format
col = [frmt(x) for x in col]
# Find max width
Max = max(len(x) for x in col)
# Right-justify
shift = str.rjust if just == ">" else str.ljust
col = [shift(x, Max, pad) for x in col]
return col
def unpack_uqs(uq_list, decimals=None):
"""Convert list of `uq`s into dict of lists (of equal-length) of attributes.
The attributes are obtained by `vars(uq)`,
and may get formatted somehow (e.g. cast to strings) in the output.
If `uq` is `None`, then `None` is inserted in each list.
Else, `uq` must be an instance of `dapper.tools.rounding.UncertainQtty`.
Parameters
----------
uq_list: list
List of `uq`s.
decimals: int
Desired number of decimals.
Used for (only) the columns "val" and "prec".
Default: `None`. In this case, the formatting is left to the `uq`s.
"""
def frmt(uq):
if not isinstance(uq, series.UncertainQtty):
# Presumably uq is just a number
uq = series.UncertainQtty(uq)
attrs = vars(uq).copy()
# val/prec: round
if decimals is None:
v, p = str(uq).split(" ±")
else:
frmt = "%%.%df" % decimals
v, p = frmt % uq.val, frmt % uq.prec
attrs["val"], attrs["prec"] = v, p
# tuned_coord: convert to tuple
try:
attrs["tuned_coord"] = tuple(a for a in uq.tuned_coord)
except AttributeError:
pass
return attrs
cols = {}
for i, uq in enumerate(uq_list):
if uq is not None:
# Format
attrs = frmt(uq)
# Insert attrs as a "row" in the `cols`:
for k in attrs:
# Init column
if k not in cols:
cols[k] = [None]*len(uq_list)
# Insert element
cols[k][i] = attrs[k]
return cols
def tabulate_avrgs(avrgs_list, statkeys=(), decimals=None):
"""Tabulate avrgs (val±prec)."""
if not statkeys:
statkeys = ['rmse.a', 'rmv.a', 'rmse.f']
columns = {}
for stat in statkeys:
column = [getattr(a, stat, None) for a in avrgs_list]
column = unpack_uqs(column, decimals)
if not column:
raise ValueError(f"The stat. key '{stat}' was not"
" found among any of the averages.")
vals = align_col([stat] + column["val"])
precs = align_col(['1σ'] + column["prec"], just="<")
headr = vals[0]+' '+precs[0]
mattr = [f"{v} ±{c}" for v, c in zip(vals, precs)][1:]
columns[headr] = mattr
return columns
def center(E, axis=0, rescale=False):
r"""Center ensemble.
Makes use of `np` features: keepdims and broadcasting.
Parameters
----------
E: ndarray
Ensemble which going to be inflated
axis: int, optional
The axis to be centered. Default: 0
rescale: bool, optional
If True, inflate to compensate for reduction in the expected variance.
The inflation factor is \(\sqrt{\frac{N}{N - 1}}\)
where N is the ensemble size. Default: False
Returns
-------
X: ndarray
Ensemble anomaly
x: ndarray
Mean of the ensemble
"""
x = np.mean(E, axis=axis, keepdims=True)
X = E - x
if rescale:
N = E.shape[axis]
X *= np.sqrt(N/(N-1))
x = x.squeeze(axis=axis)
return X, x
def mean0(E, axis=0, rescale=True):
"""Like `center`, but only return the anomalies (not the mean).
Uses `rescale=True` by default, which is beneficial
when used to center observation perturbations.
"""
return center(E, axis=axis, rescale=rescale)[0]
def inflate_ens(E, factor):
"""Inflate the ensemble (center, inflate, re-combine).
Parameters
----------
E : ndarray
Ensemble which going to be inflated
factor: `float`
Inflation factor
Returns
-------
ndarray
Inflated ensemble
"""
if factor == 1:
return E
X, x = center(E)
return x + X*factor
def weight_degeneracy(w, prec=1e-10):
"""Check if the weights are degenerate.
If it is degenerate, the maximum weight
should be nearly one since sum(w) = 1
Parameters
----------
w: ndarray
Importance weights. Must sum to 1.
prec: float, optional
Tolerance of the distance between w and one. Default:1e-10
Returns
-------
bool
If weight is degenerate True, else False
"""
return (1-w.max()) < prec
def unbias_var(w=None, N_eff=None, avoid_pathological=False):
"""Compute unbias-ing factor for variance estimation.
Parameters
----------
w: ndarray, optional
Importance weights. Must sum to 1.
Only one of `w` and `N_eff` can be `None`. Default: `None`
N_eff: float, optional
The "effective" size of the weighted ensemble.
If not provided, it is computed from the weights.
The unbiasing factor is $$ N_{eff} / (N_{eff} - 1) $$.
avoid_pathological: bool, optional
Avoid weight collapse. Default: `False`
Returns
-------
ub: float
factor used to unbiasing variance
Reference
--------
[Wikipedia](https://wikipedia.org/wiki/Weighted_arithmetic_mean#Reliability_weights)
"""
if N_eff is None:
N_eff = 1/(w@w)
if avoid_pathological and weight_degeneracy(w):
ub = 1 # Don't do in case of weights collapse
else:
ub = 1/(1 - 1/N_eff) # =N/(N-1) if w==ones(N)/N.
return ub
|
# Copyright (c) 2016 Fabian Kochem
from libtree import Node, ReadWriteTransaction
from libtree.core.database import make_dsn_from_env
from libtree.core.query import get_node
from libtree.core.tree import insert_node
import os
import pytest
try:
from psycopg2cffi import compat
except ImportError:
pass
else:
compat.register()
import psycopg2
"""
Create this structure:
/
- nd1
- nd2
- nd2-1
- nd2-1-1
- nd2-leaf
- nd3
"""
node_ids = {}
def get_or_create_nd(cur, parent, properties, *args, **kwargs):
xtype = properties.get('type')
node_id = node_ids.get(xtype, None)
if node_id is None:
node = insert_node(cur, parent, properties=properties, *args, **kwargs)
node_ids[xtype] = node.id
return node
return get_node(cur, node_id)
@pytest.fixture(scope='module')
def trans(request):
dsn = make_dsn_from_env(os.environ)
connection = psycopg2.connect(dsn)
transaction = ReadWriteTransaction(connection, Node)
if transaction.is_installed():
transaction.uninstall()
node_ids.clear()
transaction.install()
transaction.commit()
def fin():
transaction.uninstall()
transaction.commit()
request.addfinalizer(fin)
return transaction
@pytest.fixture(scope='module')
def cur(trans):
return trans.cursor
@pytest.fixture
def root(cur):
props = {
'type': 'root',
'boolean': False,
'integer': 1,
'dict': {'key': 'value'},
'list': [{'abc': 2}]
}
return get_or_create_nd(cur, None, auto_position=False, properties=props)
@pytest.fixture
def nd1(cur, root):
props = {
'type': 'nd1',
'title': 'Node 1'
}
return get_or_create_nd(cur, root, position=4, auto_position=False,
properties=props)
@pytest.fixture
def nd2(cur, root):
props = {
'type': 'nd2',
'title': 'Node 2',
'boolean': True,
'foo': 'bar',
'dict': {'another key': 'another value'}
}
return get_or_create_nd(cur, root, position=5, auto_position=False,
properties=props)
@pytest.fixture
def nd3(cur, root):
props = {
'type': 'nd3',
'title': 'Node 3'
}
return get_or_create_nd(cur, root, position=6, auto_position=False,
properties=props)
@pytest.fixture
def nd2_1(cur, nd2):
props = {
'type': 'nd2_1',
'title': 'Node 2-1',
'dict': {'key': 'yet another value'}
}
return get_or_create_nd(cur, nd2, auto_position=False,
properties=props)
@pytest.fixture
def nd2_1_1(cur, nd2_1):
props = {
'type': 'nd2_1_1',
'title': 'Node 2-1-1',
'boolean': False,
'list': [{'def': 4}]
}
return get_or_create_nd(cur, nd2_1, auto_position=False,
properties=props)
@pytest.fixture
def nd2_leaf(cur, nd2_1_1):
props = {
'type': 'nd2_leaf',
'title': 'Node 2-leaf'
}
return get_or_create_nd(cur, nd2_1_1, auto_position=False,
properties=props)
|
"""
Base API class including methods shared between all APIs
"""
from datetime import datetime, timedelta
from json.decoder import JSONDecodeError
from typing import Any, Dict, List, Optional
from json.decoder import JSONDecodeError
import requests
from visiology_py.authorization_token import AuthorizationToken
from visiology_py.connection import Connection
class TokenEmissionError(Exception):
def __init__(self, status_code: int, response_text: str) -> None:
message = (
f'server returned "bad" status code ({status_code}) '
f'with response text "{response_text}"'
)
super().__init__(message)
class BaseApi:
def __init__(
self,
api_prefix: str,
api_version: str,
authorization_scopes: List[str],
authorization_headers: Dict[str, str],
connection: Connection,
) -> None:
self._api_prefix = api_prefix
self._api_version = api_version
self._authorization_scopes = authorization_scopes
self._authorization_headers = authorization_headers
self._connection = connection
self._token: Optional[AuthorizationToken] = None
def _url(self, path: str) -> str:
schema = self._connection.schema
host = self._connection.host
return f"{schema}://{host}{path}"
def _prefixed_url(self, path: str) -> str:
return self._url(f"{self._api_prefix}{path}")
def _headers(self, token: AuthorizationToken) -> Dict[str, str]:
return {
**token.to_authorization_header(),
"Content-Type": "application/json",
"X-API-VERSION": self._api_version,
}
def emit_token(
self,
) -> AuthorizationToken:
response = requests.request(
"POST",
self._url("/idsrv/connect/token"),
headers=self._authorization_headers,
data={
"grant_type": "password",
"scope": " ".join(self._authorization_scopes),
"response_type": "id_token token",
"username": self._connection.username,
"password": self._connection.password,
},
)
if response.status_code != 200:
raise TokenEmissionError(response.status_code, response.text)
try:
token = response.json()
except Exception:
raise TokenEmissionError(
response.status_code,
response.text
) from None
if any(
[
"expires_in" not in token,
"token_type" not in token,
"access_token" not in token,
]
):
raise TokenEmissionError(response.status_code, response.text)
expires_in = token["expires_in"]
expires_at = datetime.now() + timedelta(seconds=expires_in)
return AuthorizationToken(
type=token["token_type"],
secret=token["access_token"],
expires_at=expires_at,
)
def _ensure_token(
self,
) -> AuthorizationToken:
if self._token is None or self._token.is_expired():
self._token = self.emit_token()
return self._token
def _authorized_request(
self,
method: str,
path: str,
json: Any,
token: Optional[AuthorizationToken] = None,
) -> Any:
if token is None:
token = self._ensure_token()
# TODO: specify timeout and use scaling timeout
response = requests.request(
method,
self._prefixed_url(path),
headers=self._headers(token),
json=json,
)
assert response.status_code == 200, response.text
try:
return response.json()
except JSONDecodeError:
assert response.text == "", response.text
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: GuoCheng
#
# Created: 19/06/2015
# Copyright: (c) GuoCheng 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import fileinput
if __name__ == '__main__':
num = 4
filename = "data/features_numeric.txt"
file_data = open(filename).readlines()
count_all = 0
count_correct = 0
count_detected = 0
for line in file_data:
linelist = line.split()
tag = linelist[0]
if tag == '1':
count_all += 1
if int(linelist[2]) >= 20 and int(linelist[1]) == 1 and (linelist[18] == '1' or linelist[19] == '1') and linelist[20] == '1':
count_detected += 1
if tag == '1' and int(linelist[2]) >= 20 and int(linelist[1]) == 1 and (linelist[18] == '1' or linelist[19] == '1') and linelist[20] == '1':
count_correct += 1
print count_all
print count_detected
print count_correct
|
import numpy as np
from math import cos
from math import sin
from math import pi
import os
from dolfin import *
'''
This is a helper file. It contains routines
that are somewhat peripheral to the actual
math done in a run.
'''
# The following piece of codes makes sure we have the directory
# structure where we want to save our data.
dir_list = [ "data",
"data/square",
"data/parallelogram",
"data/antarctica",
"data/cube" ]
for path in dir_list:
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
# If we do not scale the input (i.e. we don't use variance
# normalization).
no_scaling = lambda x: 1.0
def apply_sources ( container, b, scaling = no_scaling ):
'''
given an assembled right hand side, we add to it
a delta function at a location specific to the
mesh.
The default scaling is none. If we scale then we
ensure the variance equals 1.
'''
# Add two sources in antarctica - one in the bulk
# and one in the western part, where boundary is
# tight.
name = container.mesh_name
# Use source in antarctica mesh
if "antarctica" in name:
ant = dic["antarctica"]
PointSource( container.V,
Point ( ant.source ),
scaling( ant.source )
).apply( b )
PointSource( container.V,
Point ( ant.center_source ),
scaling( ant.center_source )
).apply( b )
return
# Source in the other meshes
elif "square" in name:
source = dic["square"].source
elif "parallelogram" in name:
source = dic["parallelogram"].source
elif "cube" in name:
source = dic["cube"].source
PointSource( container.V,
Point ( source ),
scaling( source )
).apply( b )
def get_mesh( mesh_name, dims ):
'''
Generate a mesh.
'''
pts = [ np.array( [ 0.0, 1.0 ] ),
np.array( [ 1.0, 1.0 ] ),
np.array( [ 1.0, 0.0 ] ),
np.array( [ 0.0, 0.0 ] ),
np.array( [ 0.0, 1.0 ] ),
np.array( [ 1.0, 1.0 ] ),
np.array( [ 1.0, 0.0 ] ),
np.array( [ 0.0, 0.0 ] )]
if "square" in mesh_name:
file_name = "data/square/vertices.txt"
empty_file( file_name )
for pt in pts:
add_point( file_name, pt[0], pt[1] )
return UnitSquareMesh( dims, dims )
elif "parallelogram" in mesh_name:
paral = dic["parallelogram"]
mesh_obj = UnitSquareMesh( dims, dims )
# The matrix that sends the unit square
# to the parallelogram.
A = paral.transformation
# Apply matrix A to all points in xy.
mesh_obj.coordinates()[:] = np.einsum( "ij, kj -> ki", A, mesh_obj.coordinates() )
file_name = "data/parallelogram/vertices.txt"
empty_file( file_name )
# Now we save to a file the vertices of the
# parallelogram - for plotting purposes.
for pt in pts:
new_pt = np.dot( A, pt )
add_point( file_name, new_pt[0], new_pt[1] )
return mesh_obj
elif "antarctica" in mesh_name:
return Mesh( "meshes/antarctica3.xml" )
elif "cube" in mesh_name:
return UnitCubeMesh( dims, dims, dims )
def save_plots( data,
desc,
cot ):
'''
a routine to save plots and data for plots, based
on the description variable desc.
data is a FE function
'''
# Directory where we save the data
location = "data/" + cot.mesh_name
# In square and parallelogram we show a cross section,
# so we need to code it. The cross section is defined
# by the equation slope * x + intercept.
if "square" in cot.mesh_name or "parallelogram" in cot.mesh_name:
# Creat all the required files to hold the
# data we generate.
line_file = location + "/line.txt"
source_file = location + "/source.txt"
plot_file = location + "/" + add_desc( desc ) + ".txt"
empty_file( line_file, source_file, plot_file )
# Save the source location to a designated file
source = dic[cot.mesh_name].source
add_point( source_file, source[0], source[1] )
# parametrizes the cross section
x_range = np.hstack( ( np.arange( -0.1 , 0.05, 0.001 ),
np.arange( 0.05, 0.5 , 0.01 ) ) )
y_data = []
x_real = []
if "square" in cot.mesh_name:
slope = 0.0
else:
slope = .6
intercept = source[1] - slope * source[0]
line = lambda x: (x, slope * x + intercept )
# For every point in the parametrization, see if it
# gives a point that is inside the square/parallelogram.
# If so - save it with the right value.
for pt in x_range:
try:
# Evaluate the FE funciton at the cross-section.
# If it is not in the domain this will throw an
# exception, which we ignore (below).
y = data( line(pt) )
add_point( plot_file, pt, y )
add_point( line_file, pt, line(pt)[1] )
y_data.append( y )
x_real.append( pt )
# The exception mentioned above is ignored
except:
pass
# Saving without cross section is so much easier!!!!
else:
loc_file = File( location + "/" + add_desc( desc ) + ".pvd" )
loc_file << data
def add_desc( str_list ):
res = ""
for p in str_list:
res = res + "_" + p.title().replace(" ","_")
return res[1:]
def make_tit( desc ):
res = ""
for p in desc:
res = res + " " + p.title()
return res
def empty_file( *args ):
for file_name in args:
open(file_name, 'w+').close()
def add_point( plot_file, *args ):
'''
a routine to add a point to a file
'''
dat = ""
for coordinate in args:
dat = dat + str(coordinate) + " "
dat = dat + "\n"
open( plot_file, "a").write(dat)
# The declaration ... = lambda: None is only there
# to create an empty object, since lambda fuctions
# are objects in python is this is super clean IMO.
dic = {}
dic["square"] = lambda: get_mesh( "square", 256 )
dic["square"].alpha = 121.0
dic["square"].source = np.array( [ 0.05 , 0.5 ] )
dic["parallelogram"] = lambda: get_mesh( "parallelogram", 128 )
dic["parallelogram"].alpha = 121.
dic["parallelogram"].transformation = np.array( [
[ cos(pi/4-pi/8) , cos(pi/4+pi/8) ],
[ sin(pi/4-pi/8) , sin(pi/4+pi/8) ] ] )
dic["parallelogram"].source = np.array( [ 0.025 , 0.025 ] )
dic["antarctica"] = lambda: get_mesh( "antarctica", 0 )
dic["antarctica"].source = np.array( [ 7e2 , 5e2 ] )
dic["antarctica"].center_source = np.array( [ -1.5e3 , 600.0 ] )
dic["antarctica"].alpha = 1e-5
dic["antarctica"].gamma = 1.
dic["cube"] = lambda: get_mesh( "cube", 64 )
dic["cube"].alpha = 25.
dic["cube"].source = np.array( [ 0.05 , 0.5, 0.5] )
|
# Generated by Django 2.2.7 on 2019-11-26 13:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0008_auto_20191126_1243'),
]
operations = [
migrations.AlterField(
model_name='doctor',
name='image',
field=models.ImageField(blank=True, default='/Users/darkhan/Desktop/Clinic/Images/Снимок экрана 2019-11-25 в 18.42.23.png', upload_to=''),
),
]
|
import socket #importa modulo socket
IP_destino = "192.168.0.13" #Endereço IP do servidor
PORTA_destino = 5005 #Numero de porta do servidor
#Criação de socket UDP
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
#sock.bind((IP_servidor, PORTA_destino))
while True:
MENSAGEM = input("enviar: ")
print ("Mensagem enviada:", MENSAGEM)
#Envia mensagem usando socket UDP
sock.sendto(MENSAGEM.encode('UTF-8'), (IP_destino, PORTA_destino))
data, addr = sock.recvfrom(1024)
print ("Mensagem recebida:", data)
|
"""controle_gastos URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from contas.views import index, cadastro, contato, sobre, novasenha, base
urlpatterns = [
path('admin/', admin.site.urls),
path('', index),
path('cadastro/', cadastro, name='cadastrar'),
path('contato/', contato, name='contato'),
path('sobre/', sobre, name='sobre'),
path('novasenha/', novasenha, name='novasenha'),
path('', base, name='base')
]
|
import SOAPpy
url = 'http://soap.amazon.com/schemas3/AmazonWebServices.wsdl'
proxy = SOAPpy.WSDL.Proxy(url)
# show methods retrieved from WSDL
print '%d methods in WSDL:' % len(proxy.methods) + '\n'
for key in proxy.methods.keys():
print key
print
# search request
_query = 'spotted owl'
request = { 'keyword': _query,
'page': '1',
'mode': 'books',
'tag': '',
'type': 'lite',
'devtag': '0J356Z09CN88KB743582' }
results = proxy.KeywordSearchRequest(request)
# display results
print 'Amazon.com search for " ' + _query + ' "\n'
print 'total pages of results (max 10 per page): ' + str(results.TotalPages)
print 'total results: ' + str(results.TotalResults) + '\n'
# only show first result here
if (results.TotalResults > 0):
print 'displaying first result (of %s):\n' %results.TotalResults
details = results.Details[0]
# we must use the _keys() method of SOAPpy Types.py for arrayType
for key in details._keys():
print key + ': ' + details[key]
print
|
"""This module contains classes to represent image resources."""
import os
from flask import request, current_app
from flask_restful import Resource
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
from marshmallow import ValidationError
from app.models import Image, Artist
from app.extensions import db
from http import HTTPStatus
from app.api.helpers import allowed_file_extension, create_directory, create_filepath
class ArtistImageListAPI(Resource):
"""Class to represent a collection of image resources
for an artist.
"""
def __init__(self, **kwargs):
self._schema = kwargs["schema"]
def get(self, artist_id):
"""Return all image resources for a specific artist."""
artist = Artist.query.get(artist_id)
if artist is None:
return {"message": "Artist could not be found."}, HTTPStatus.NOT_FOUND
images = [artist.image]
return self._schema.dumps(images, many=True), HTTPStatus.OK
def put(self, artist_id):
"""Create or replace an image resource for a specific artist."""
artist = Artist.query.get(artist_id)
if artist is None:
return {"message": "Artist could not be found."}, HTTPStatus.NOT_FOUND
file = request.files.get("artist_image")
if file is None:
return (
{"message": "Could not find an image file in the request."},
HTTPStatus.BAD_REQUEST,
)
if file.filename == "":
return {"message": "No selected file."}, HTTPStatus.BAD_REQUEST
if not allowed_file_extension(file.filename):
return (
{
"message": f"File extension not allowed. Valid extensions include: {list(current_app.config['ALLOWED_FILE_EXTENSIONS'])}"
},
HTTPStatus.BAD_REQUEST,
)
filename = secure_filename(file.filename)
create_directory(current_app.config["UPLOAD_DIRECTORY"])
artist_image = artist.image
# creating new image for artist
if artist_image is None:
existing_image = Image.query.filter_by(original_filename=filename).first()
if existing_image is None:
version = 1
# another artist already has this image
# need to increment the version number when saving the image
else:
version = existing_image.version + 1
# replacing existing image
else:
version = 1
# duplicate image, no action is needed
if artist_image.original_filename == filename:
return "", HTTPStatus.NO_CONTENT
else: #not a duplicate, need to delete the current image from filesystem to make room
if os.path.exists(artist.image.path):
os.remove(artist.image.path)
destination = create_filepath(filename, version=version)
file.save(destination)
image = Image(original_filename=filename, path=destination, version=version)
artist.image = image
db.session.commit()
return {}, HTTPStatus.NO_CONTENT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.