blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
261
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
45
| license_type
stringclasses 2
values | repo_name
stringlengths 8
111
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 72
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 530k
616M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
24.6k
| gha_license_id
stringclasses 9
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 40
values | src_encoding
stringclasses 10
values | language
stringclasses 1
value | is_vendor
bool 1
class | is_generated
bool 2
classes | length_bytes
int64 11
4.05M
| extension
stringclasses 25
values | content
stringlengths 10
4.04M
| authors
sequencelengths 1
1
| author_id
stringclasses 578
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6b59d53ff5dca12c2cf49ecda84be12a1c60a12c | a3644ed207867df4d78a04af39ac3e26f86f9012 | /ibvp/language/symbolic/util.py | cf587104d319938fea973aba507443ccc906a896 | [
"MIT"
] | permissive | ibvp/ibvp | 006887be85a37ac4da51664d5fec9244c446cacd | c758b150cbd822bd17444499bea29c53b0606327 | refs/heads/master | 2022-05-07T02:17:46.232332 | 2022-03-20T19:34:13 | 2022-03-20T19:34:13 | 21,990,116 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,418 | py | from __future__ import division
from __future__ import absolute_import
from six.moves import range
__copyright__ = "Copyright (C) 2010-2013 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
def pretty(expr):
from ibvp.language.symbolic.mappers import PrettyStringifyMapper
stringify_mapper = PrettyStringifyMapper()
from pymbolic.mapper.stringifier import PREC_NONE
result = stringify_mapper(expr, PREC_NONE)
splitter = "="*75 + "\n"
cse_strs = stringify_mapper.get_cse_strings()
if cse_strs:
result = "\n".join(cse_strs)+"\n"+splitter+result
return result
def join_fields(*args):
from pytools.obj_array import make_obj_array, log_shape
from pymbolic.geometric_algebra import MultiVector, bit_count
res_list = []
for arg in args:
if isinstance(arg, list):
res_list.extend(arg)
elif isinstance(arg, MultiVector):
for grade in arg.all_grades():
for bits in range(2**arg.space.dimensions):
if bit_count(bits) == grade:
res_list.append(arg.data.get(bits, 0))
elif isinstance(arg, np.ndarray):
if log_shape(arg) == ():
res_list.append(arg)
else:
res_list.extend(arg.flat)
else:
res_list.append(arg)
return make_obj_array(res_list)
| [
"inform@tiker.net"
] | inform@tiker.net |
06683c64c9c082713d0b286d60bf3d006bef3569 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/NicolasHug_Surprise/Surprise-master/examples/grid_search_usage.py | f915af8c2eff0478eb4c7a991024a2a4e4aa1ff3 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 1,150 | py | """
This module describes how to manually train and test an algorithm without using
the evaluate() function.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from surprise import GridSearch
from surprise import SVD
from surprise import Dataset
param_grid = {'n_epochs': [5, 10], 'lr_all': [0.002, 0.005],
'reg_all': [0.4, 0.6]}
grid_search = GridSearch(SVD, param_grid, measures=['RMSE', 'FCP'])
# Prepare Data
data = Dataset.load_builtin('ml-100k')
data.split(n_folds=3)
grid_search.evaluate(data)
# best RMSE score
print(grid_search.best_score['RMSE'])
# >>> 0.96117566386
# combination of parameters that gave the best RMSE score
print(grid_search.best_params['RMSE'])
# >>> {'reg_all': 0.4, 'lr_all': 0.005, 'n_epochs': 10}
# best FCP score
print(grid_search.best_score['FCP'])
# >>> 0.702279736531
# combination of parameters that gave the best FCP score
print(grid_search.best_params['FCP'])
# >>> {'reg_all': 0.6, 'lr_all': 0.005, 'n_epochs': 10}
import pandas as pd # noqa
results_df = pd.DataFrame.from_dict(grid_search.cv_results)
print(results_df)
| [
"659338505@qq.com"
] | 659338505@qq.com |
e854ed4a3386c854b4fb23ef278a885098c04eaf | 2b49bf0b7b9a62eb665cb0da9a86d7c65433f8a2 | /Additional/206.Reverse Linked List.py | 06f7ef7c083a993eeb7cd52a2f6ada4422dd50d7 | [] | no_license | samuel871211/My-python-code | f7472fff671437d6181b91d36a77e24eb04678c6 | 3120cfb6ccaeade969dd0ea0ff335b4a5789ba74 | refs/heads/master | 2023-03-04T13:48:37.658549 | 2023-02-28T06:16:52 | 2023-02-28T06:16:52 | 210,172,178 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
a = []
while head != None:
a.append(head.val)
head = head.next
if len(a) == 0:
return None
else:
a.reverse()
newhead = ListNode(a[0])
cur = newhead
for i in range(1,len(a)):
cur.next = ListNode(a[i])
cur = cur.next
return newhead
| [
"noreply@github.com"
] | noreply@github.com |
904ddc6a110c928eecd9ed053afa3bf80f4931a3 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/25/usersdata/98/11884/submittedfiles/av1_3.py | e38e0f0784c64456ff7dcadb762460593411b8a4 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
a=int(input('Digite o valor de a: '))
b=int(input('Digite o valor de b: '))
i=1
cont=0
c=0
while True:
if a%i==0 and b%i==0:
cont=cont+1
c=i
i=i+1
if i==a or i==b:
break | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
e50e19db7754f252118d5e3c69541abe67d0fdab | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/42/usersdata/69/21660/submittedfiles/jain.py | 34c02d431af79001b4eb9414ce0115cad59ff0fc | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | # -*- coding: utf-8 -*-
from __future__ import division
import funcoes
'''
ENTRADA TESTE
f = 0.2
dH = 5
L = 3250
Q = 0.005
g = 9.81
v = 0.000001
e = 0.00006
k = 10
A saida para esta entrada é aproximadamente: 0.1247 (D) e 0.0224 (f)
'''
f = 0.2
dH = input('Digite a perda de carga: ')
L = input('Digite o comprimento da tubulação: ')
Q = input('Digite a vazão: ')
g = input('Digite a gravidade: ')
v = input('Digite a viscosidade cinemática: ')
e = input('Digite a rugosidade absoluta: ')
k = 10
#comece aqui
import math
def diametro(fn,L,Q,dH):
Diam=((8*fn*L*Q*Q)/(math.pi*math.pi*dH*g))**(1/5)
return Diam
def Reynalds(Q,D,v):
R=4*Q/(math.pi*D*v)
return R
def atrito(Rey,E,D):
s=(E/(3.7*D))+(5.74/(Rey**0.9))
t=(2500/Rey)**6
f=(((64/Rey)**8)+9.5*((math.log(s)-t)**(-16)))**0.125
return f
for i in range(0,k,1):
D=diametro(fn,L,Q,dH)
Rey=Reynalds(Q,D,v)
fn=atrito(Rey,e,D)
if 0.000001<=(e/D)<=0.01 and 5000<=Rey<=100000000:
if fn==f:
break
else:
f=fn
print('%.10f'%f)
print('%.10f'%D) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d84e02f5f2815a7f82b35a8daa0cb83b201fc09c | 6e1508ebdaf63b3afee10926bdf74ce9478f3508 | /kadanesalgorithm.py | 4ebc51ef1200e349f04a36b701abf97ccdb58046 | [] | no_license | dopeprogr4mmer/DSA | 5f2741a924bec9b6add7b4d92d207ec553576439 | 18f4bd93b264acfd4cfd91b9aa318bdf502d0339 | refs/heads/main | 2023-07-17T22:33:40.347653 | 2021-08-25T05:28:04 | 2021-08-25T05:28:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | <<<<<<< HEAD
<<<<<<< HEAD
import sys
=======
>>>>>>> d86366dc91ea42926b2be1e049e46fb03f518c05
=======
>>>>>>> d86366dc91ea42926b2be1e049e46fb03f518c05
def maxSubArraySum(a,size):
max_so_far = 0
max_ending_here = 0
for i in range(size):
<<<<<<< HEAD
<<<<<<< HEAD
#max_ending_here += a[i]
if max_ending_here + a[i]<a[i]:
max_ending_here = a[i]
else:
max_ending_here+=a[i]
if(max_so_far<max_ending_here):
max_so_far = max_ending_here
print(max_so_far)
return max_so_far
def max_SubArray_Sum(a,size): #Kadane algo
##Your code here
output_arr = [0]*size
output_arr[0]=a[0]
max_sum = a[0]
for i in range(1,size):
output_arr[i] = max(a[i], output_arr[i-1]+a[i])
#print(output_arr)
max_sum = max(max_sum, output_arr[i])
print(output_arr, max_sum)
return max_sum
maxSubArraySum([2,3,-6,3,3,-6,1,-5], 5)
=======
=======
>>>>>>> d86366dc91ea42926b2be1e049e46fb03f518c05
max_ending_here += a[i]
if max_ending_here<0:
max_ending_here = 0
elif(max_so_far<max_ending_here):
max_so_far = max_ending_here
<<<<<<< HEAD
return max_so_far
>>>>>>> d86366dc91ea42926b2be1e049e46fb03f518c05
=======
return max_so_far
>>>>>>> d86366dc91ea42926b2be1e049e46fb03f518c05
| [
"noreply@github.com"
] | noreply@github.com |
c4de4f95686f6d39c4a347e4462b601fbc2bd6d2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03828/s176803120.py | 3c09dd5cfe45d562d5aee2961335ac10dec7d7b7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | from collections import Counter
MOD = 10 ** 9 + 7
def factorize(n):
""" Simple factorize
:param n: number to factorize
:return: list of factors
time complexity : O(n√n)
space complexity : O(n)
"""
factors = []
for i in range(2, n+1):
while n % i == 0:
n = n // i
factors.append(i)
return factors
def main():
N = int(input())
factors = []
for i in range(1, N+1):
factors += factorize(i)
factor_counts = list(Counter(factors).values())
ans = 1
for v in factor_counts:
ans = ans * (v+1) % MOD
print(ans)
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f970d26407f174a743bcb989cdae5fd18f1cf862 | 969ae96c883fa8aee938a03af40be54dad60f0ca | /query_scripts/intersect_base.py | 13528a869bc3f58b6e98c732106540b26f4a338d | [] | no_license | fuxmanlab/altered_TFBS | 1cd695c734cbbfd23b72c683ff9a531306144337 | 2cc4a3c95836b3f980764619597b37cd967091dc | refs/heads/master | 2022-11-19T06:42:35.100582 | 2020-07-28T14:58:02 | 2020-07-28T14:58:02 | 264,718,739 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,149 | py | # Base class for loading, saving, and querying the .bed and .vcf
# files
import useful
import tempfile
import os
import glob
import shutil
class IntersectBase(object):
''' Base class for intersecting .bed and .vcf files
with the alterome datafiles using Spark-SQL.'''
def __init__(self, filename, hadoop=False):
if not hadoop:
if not os.path.exists(filename):
raise OSError("File not found: %s" % filename)
self.filename = filename
# The loaded dataframe from the file
self.df = None
#The result of the query
self.intersect_df = None
# Are we on hadoop?
self.hadoop = hadoop
def to_df(self, spark):
''' Convert the file to a Spark DataFrame, stored
internally in self.df. The df is registered as
a Spark SQL temp table. '''
pass
def intersection_query(self,spark):
''' Intersection query for the .bed and .vcf files with the
alterome in the tfbs_df and tf_info_df dataframes. Stores
the result internally in self.intersect_df. '''
pass
def write_df(self, output_csv, npartitions=None):
''' Write in parallel to a set of output CSV files
and them consolidate them into 1.'''
tmp_name = self.df_to_csv(output_csv, npartitions)
if not self.hadoop:
self.consolidate_csv(tmp_name,output_csv)
@useful.timeit
def df_to_csv(self,output_csv, npartitions=None):
# Repartition if asked
if npartitions:
self.intersect_df.repartition(npartitions)
# Get a unique temporary filename using the process id
if not self.hadoop:
tmp_name = str(os.getpid())+'_tmp'
tmp_path = os.path.join(os.environ['TMPDIR'],tmp_name+'.csv')
if os.path.exists(tmp_path):
shutil.rmtree(tmp_path)
self.intersect_df.write.option('header','true').csv(tmp_path)
return tmp_path
else:
self.intersect_df.write.option('header','true').csv(output_csv)
@useful.timeit
def consolidate_csv(self, input_dir,output_csv, delete_input=True):
print("Consolidating parallel CSV files.")
if os.path.exists(output_csv):
os.unlink(output_csv)
# Then write a loop to read them in one-by-one and append to the requested output_csv
csv_files = glob.glob(os.path.join(input_dir,'*.csv'))
shutil.copyfile(csv_files.pop(0),output_csv)
# Now open the output file for appending and add all the
# others to it.
with open(output_csv, 'ab') as outfile:
for fname in csv_files :
with open(fname, 'rb') as infile:
# Throw away the header line
infile.readline()
# Block copy rest of file from input to output without parsing
shutil.copyfileobj(infile, outfile)
# Finally delete the whole temp directory if requested.
if delete_input:
shutil.rmtree(input_dir)
| [
"noreply@github.com"
] | noreply@github.com |
403fac664e7532d39cf7a726cf9165c6b7e21555 | fa461310d67a51dc0f473e54bd02c90c12c7f7dc | /Query understanding/demo1.py | a9f1754dc771f5deedea583220bd1d8b0d3f305b | [] | no_license | yangeryang/Ads-ranking- | 624cf215eda0837e0df738a7ec96d2811d053916 | 216c10fa49c52e0fbb913ef2a7d53cd92700d576 | refs/heads/master | 2020-05-22T05:23:33.137029 | 2019-05-12T16:36:27 | 2019-05-12T16:36:27 | 186,234,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | import sys
from pyspark import SparkContext
if __name__ == "__main__":
file = sys.argv[1] #raw train file
sc = SparkContext(appName="demo1")
data_uc = sc.textFile(file).map(lambda line: line.upper())
data_filt = data_uc.filter(lambda line: line.startswith("T"))
#data_uc...
data_filt.saveAsTextFile("demo_T_output6")
sc.stop()
| [
"noreply@github.com"
] | noreply@github.com |
76a29edd0e8bbc220e530784749c7239e7e13007 | 650772c1de39412ed293bdd9f28518d3e50b2ef0 | /transformations/color_demo.py | 4f726733b2310e4fb9232d6f0ae9d75b6b914973 | [] | no_license | tuftsceeo/Onshape-PLUS-Team | 2ecb62d40ba5349cad3ebd39368b771d95d88649 | 40bcd952ca7b84660615d8812c0e3ec3ce0211e6 | refs/heads/master | 2022-12-03T07:22:49.854357 | 2020-08-22T00:47:29 | 2020-08-22T00:47:29 | 285,607,231 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,350 | py | ###############################################################################
# Project name: Color Demo
# File name: color_demo.py
# Author: Therese (Teo) Patrosio @imnotartsy
# Date: 7/21/20
# Description: Connects spike bluetooth to onshape api for 7/23 demo
# History:
# Last modified by Teo 7/24/20
# (C) Tufts Center for Engineering Education and Outreach (CEEO)
###############################################################################
import serial #pip3 install pyserial
import utils.transform_utils as transform
import utils.onshape_utils as onshape
import argparse
from datetime import datetime
### Connect to Serial
ser = serial.Serial('/dev/tty.LEGOHubOwen-SerialPortP') # serial.Serial(port_args.port) #
### Gets Spike starter message
for i in range(0,2):
line = ser.readline()
print(line.decode(), end="")
### Catch case for if spike goes into data spewing mode (untested) (WIP)
# Cancels any Data Sending
ser.write('\x03'.encode())
ser.write('\x03'.encode())
ser.write('\x03'.encode())
ser.write('\x03'.encode())
### Message to send to serial
## This program gets the gesture of the spike
message = """
import hub,utime\r\n
from spike.control import wait_for_seconds\r\n
def setMotor(large, small):\r\n\b\b
hub.port.C.motor.run_to_position(large, 50)\r\n\b
hub.port.D.motor.run_to_position(small, 50)\r\n\b
\r\n\r\n\r\n\r\n
"""
print(message)
ser.write('\x03'.encode())
ser.write(message.encode())
last = 0
assembly = onshape.getAssemblyInfo(False)
# print(assembly["MvFKyhclA9pW5axe3"]["fullPath"])
### Read Data and call API
for i in range(0,1000):
line = ser.readline()
## Prints serial line
print(line.decode(), end="")
try:
curr = int(line.decode())
except:
print("position not updated")
curr = last
## If state changes, call a transform
if(abs(curr - last) > 5):
## Sets transformation
args = [0, 0, 0, 0, 0, 1, curr]
## Transforms set up (get matrix and part id from assembly info)
M = transform.getTranslationMatrix(args, False)
partsToTransform = [assembly["MvFKyhclA9pW5axe3"]["fullPath"]] # selects motor axle
state = onshape.postTransform(M, False, partsToTransform, False)
print("\tTransformation status:", state, datetime.now())
last = curr
ser.close() | [
"noreply@github.com"
] | noreply@github.com |
736a6dd319cdb36e01d57e42fdf371c5db550c22 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/ghwatson_faststyle/faststyle-master/losses.py | 7a4cc6b60cea27257d8a4820a88ca8fb5d7f1574 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 3,526 | py | """
This file contains the different loss functions.
File author: Grant Watson
Date: Feb 2017
"""
import tensorflow as tf
import numpy as np
def content_loss(content_layers, target_content_layers,
content_weights):
"""Defines the content loss function.
:param content_layers
List of tensors for layers derived from training graph.
:param target_content_layers
List of placeholders to be filled with content layer data.
:param content_weights
List of floats to be used as weights for content layers.
"""
assert(len(target_content_layers) == len(content_layers))
num_content_layers = len(target_content_layers)
# Content loss
content_losses = []
for i in xrange(num_content_layers):
content_layer = content_layers[i]
target_content_layer = target_content_layers[i]
content_weight = content_weights[i]
loss = tf.reduce_sum(tf.squared_difference(content_layer,
target_content_layer))
loss = content_weight * loss
_, h, w, c = content_layer.get_shape().as_list()
num_elements = h * w * c
loss = loss / tf.cast(num_elements, tf.float32)
content_losses.append(loss)
content_loss = tf.add_n(content_losses, name='content_loss')
return content_loss
def style_loss(grams, target_grams, style_weights):
"""Defines the style loss function.
:param grams
List of tensors for Gram matrices derived from training graph.
:param target_grams
List of numpy arrays for Gram matrices precomputed from style image.
:param style_weights
List of floats to be used as weights for style layers.
"""
assert(len(grams) == len(target_grams))
num_style_layers = len(target_grams)
# Style loss
style_losses = []
for i in xrange(num_style_layers):
gram, target_gram = grams[i], target_grams[i]
style_weight = style_weights[i]
_, c1, c2 = gram.get_shape().as_list()
size = c1*c2
loss = tf.reduce_sum(tf.square(gram - tf.constant(target_gram)))
loss = style_weight * loss / size
style_losses.append(loss)
style_loss = tf.add_n(style_losses, name='style_loss')
return style_loss
def tv_loss(X):
"""Creates 2d TV loss using X as the input tensor. Acts on different colour
channels individually, and uses convolution as a means of calculating the
differences.
:param X:
4D Tensor
"""
# These filters for the convolution will take the differences across the
# spatial dimensions. Constructing these on paper has to be done carefully,
# but can be easily understood when one realizes that the sub-3x3 arrays
# should have no mixing terms as the RGB channels should not interact
# within this convolution. Thus, the 2 3x3 subarrays are identity and
# -1*identity. The filters should look like:
# v_filter = [ [(3x3)], [(3x3)] ]
# h_filter = [ [(3x3), (3x3)] ]
ident = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
v_array = np.array([[ident], [-1*ident]])
h_array = np.array([[ident, -1*ident]])
v_filter = tf.constant(v_array, tf.float32)
h_filter = tf.constant(h_array, tf.float32)
vdiff = tf.nn.conv2d(X, v_filter, strides=[1, 1, 1, 1], padding='VALID')
hdiff = tf.nn.conv2d(X, h_filter, strides=[1, 1, 1, 1], padding='VALID')
loss = tf.reduce_sum(tf.square(hdiff)) + tf.reduce_sum(tf.square(vdiff))
return loss
| [
"659338505@qq.com"
] | 659338505@qq.com |
40a5badf20a8815924f3d9ea4e245dba81149a88 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03588/s910432178.py | 314d7a583d1067ee67cd31e93342774353c07a3a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | import sys
def solve():
readline = sys.stdin.buffer.readline
mod = 10 ** 9 + 7
n = int(readline())
ab = [list(map(int, readline().split())) for _ in range(n)]
ab.sort()
print((ab[-1][0] - ab[0][0] + 1) + (ab[0][0] - 1) + (ab[-1][1]))
if __name__ == '__main__':
solve()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
702e397972e162ab5ddf2af196684a76f393bd61 | 71673d845952b50986d1c21dc5bbbcab2a2a2651 | /introduction_to_lxml.py | 0783fcf78d6a6982eff93f7b0558518976c20d60 | [] | no_license | afcarl/introductionToWebScraping | 77a44bfb7655e44231bed216d37b015e3cf52a5c | d1039aeee87365f2807dd198e53bd1bb6224a550 | refs/heads/master | 2020-03-26T04:23:54.052825 | 2015-06-18T14:23:40 | 2015-06-18T14:23:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | import requests
import lxml.html
base_url = "https://www.google.com"
def scrape(url,base_url,depth):
if depth == 0:
return True
r = requests.get(url)
html = lxml.html.fromstring(r.text)
links = html.xpath("//a/@href")
for ind,link in enumerate(links):
if "http" in link:
print link
else:
print base_url+link
links[ind] = base_url+link
for link in links:
scrape(link,base_url,depth-1)
scrape(base_url,base_url,5)
| [
"ericschles@gmail.com"
] | ericschles@gmail.com |
611ca1b0710e080956b3f0259d5042c17ada5814 | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/signalr/azure-mgmt-signalr/azure/mgmt/signalr/aio/operations/_usages_operations.py | aa1860efef37dbf2413c285639f2957501b5bfdb | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 5,150 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._usages_operations import build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations:
"""UsagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.signalr.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.SignalRUsageList"]:
"""List resource usage quotas by location.
:param location: the location like "eastus".
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SignalRUsageList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.signalr.models.SignalRUsageList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SignalRUsageList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SignalRUsageList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.SignalRService/locations/{location}/usages'} # type: ignore
| [
"noreply@github.com"
] | noreply@github.com |
df4017d719a457eb43a5aa6c9d289e9e674a9b84 | e61802befd592a18d535999277e3d4767042a441 | /problem_11.py | 8c4304f4ad7e6f7973982931f25f9f46d2d2458a | [] | no_license | subenakhatun/pythonbasic | 5962804d4aaee18c9bc5e8f1d178ae846efabd85 | 36066df0a9355c6d451e80e06fba2fb712759f3d | refs/heads/master | 2021-07-20T23:21:49.113162 | 2020-05-15T04:11:19 | 2020-05-15T04:11:19 | 163,249,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | str = 'subena'
print(len(str)) | [
"noreply@github.com"
] | noreply@github.com |
45158fd73f856d10753fdab1158bbd52cbc902c4 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /es_maml/policies.py | f901bf44a33836629722349dd7c0953bd0a94da7 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 9,160 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains policies used in MAML."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
class Policy(object):
r"""Abstract class for different policies \Pi: S -> A.
Class is responsible for creating different policies and provides an interface
for computing actions recommended by policies in different input states.
In particular, this class provides an interface that accepts compressed
vectorized form of the policy and decompresses it.
Standard procedure for improving the parameters of the policy with an
interface given by the class:
policy = policies.ParticularClassThatInheritsFromBaseClass(...)
vectorized_network = policy.get_initial()
while(...):
new_vectorized_network = SomeTransformationOf(vectorized_network)
policy.update(new_vectorized_network)
and SomeTransformationOf is a single step of some optimization procedure such
as gradient descent that sees the policy in the vectorized form.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def update(self, vectorized_parameters):
"""Updates the policy using new parameters from <vectorized_parameters>.
Updates the parameters of the policy using new parameters encoded by
<vectorized_parameters>. The size of the vector <vectorized_parameters>
should be the number of all biases and weights of the neural network.
We use the convention where parameters encoding matrices of connections of
the neural network come in <vectorized_parameters> before parameters
encoding biases and furthermore the order in <vectorized_parameters> of
parameters encoding weights for different matrices/biases-vectors is
inherited from the order of these matrices/biases-vectors in the
decompressed neural network. Details regarding compression depend on
different neural network architectures used (such as: structured and
unstructured) and are given in the implementations of that abstract method
in specific classes that inherit from Policy.
Args:
vectorized_parameters: parameters of the neural network in the vectorized
form.
Returns:
"""
raise NotImplementedError('Abstract method')
@abc.abstractmethod
def get_action(self, state):
"""Returns the action proposed by a policy in a given state.
Returns an action proposed by the policy in <state>.
Args:
state: input state
Returns:
Action proposed by the policy represented by an object of the class in a
given state.
"""
raise NotImplementedError('Abstract method')
@abc.abstractmethod
def get_initial(self):
"""Returns the default parameters of the policy in the vectorized form.
Initial parameters of the policy are output in the vectorized form.
Args:
Returns:
Numpy array encoding in the vectorized form initial parameters of the
policy.
"""
raise NotImplementedError('Abstract method')
@abc.abstractmethod
def get_total_num_parameters(self):
"""Outputs total number of parameters of the policy.
Args:
Returns:
Total number of parameters used by the policy.
"""
raise NotImplementedError('Abstract method')
class BasicTFPolicy(Policy):
"""Basic Policy implemented in Tensorflow."""
def __init__(self, state_dimensionality, action_dimensionality, hidden_layers,
scope):
self.state_dimensionality = state_dimensionality
self.action_dimensionality = action_dimensionality
self.input_ph = tf.placeholder(
dtype=tf.float32, shape=[None, self.state_dimensionality])
self.output_ph = tf.placeholder(
dtype=tf.float32, shape=[None, self.action_dimensionality])
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
self.out = self.input_ph
for i, layer_size in enumerate(hidden_layers):
self.out = tf.layers.dense(
self.out, layer_size, activation=tf.nn.relu, name='h' + str(i))
self.main_out = tf.layers.dense(
self.out, self.action_dimensionality, name='main_out')
self.secondary_out = tf.layers.dense(
self.out, self.action_dimensionality, name='secondary_out')
self.action = tfp.distributions.Normal(
loc=self.main_out, scale=self.secondary_out).sample()
self.loss = tf.losses.mean_squared_error(self.main_out, self.output_ph)
self.obj_tensor = -1.0 * self.loss
self.tf_params = tf.trainable_variables(scope)
self.shapes = [v.shape.as_list() for v in self.tf_params]
self.sizes = [int(np.prod(s)) for s in self.shapes]
self.total_nb_parameters = sum(self.sizes)
self.assign_ph_dict = {
v: tf.placeholder(dtype=tf.float32, shape=v.shape.as_list())
for v in self.tf_params
}
self.assign_ops = []
for v in self.tf_params:
self.assign_ops.append(v.assign(self.assign_ph_dict[v]))
with tf.control_dependencies(self.assign_ops):
# This is needed to input Numpy Params into network temporarily
self.action = tf.identity(self.action)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.np_params = np.concatenate([
self.sess.run(tf.reshape(tf_param, [-1])) for tf_param in self.tf_params
])
def update(self, flattened_weights):
self.np_params = flattened_weights
def get_action(self, state):
ph_dict = {}
for ind, v in enumerate(self.tf_params):
numpy_flat_val = self.np_params[sum(self.sizes[:ind]
):sum(self.sizes[:ind + 1])]
numpy_reshaped = np.reshape(numpy_flat_val, self.shapes[ind])
v_ph = self.assign_ph_dict[v]
ph_dict[v_ph] = numpy_reshaped
ph_dict[self.input_ph] = state.reshape(-1, self.state_dimensionality)
action_numpy = self.sess.run(self.action, feed_dict=ph_dict)
return action_numpy.flatten()
def get_initial(self):
return self.np_params
def get_total_num_parameters(self):
return self.total_nb_parameters
class DeterministicNumpyPolicy(Policy):
"""Deterministic Policy implemented in Numpy."""
def __init__(self,
state_dimensionality,
action_dimensionality,
hidden_layers,
init_sd=None):
self.state_dimensionality = state_dimensionality
self.action_dimensionality = action_dimensionality
self.layers = hidden_layers + [action_dimensionality]
self.layers.insert(0, state_dimensionality)
self.weights = []
self.biases = []
self.weight_positions = []
self.bias_positions = []
self.init_params = []
flat_pos = 0
for dims in zip(self.layers[:-1], self.layers[1:]):
in_size = dims[0]
out_size = dims[1]
if init_sd is None:
init_sd = np.sqrt(2.0 / (in_size))
init_weights = init_sd * np.random.normal(0, 1, size=(out_size * in_size))
self.init_params.extend(init_weights.tolist())
self.weights.append(np.reshape(init_weights, (out_size, in_size)))
self.weight_positions.append(flat_pos)
flat_pos += out_size * in_size
init_biases = np.zeros(out_size)
self.init_params.extend(init_biases.tolist())
self.biases.append(init_biases)
self.bias_positions.append(flat_pos)
flat_pos += out_size
self.weight_positions.append(flat_pos)
def update(self, flat_weights):
for i, dims in enumerate(zip(self.layers[:-1], self.layers[1:])):
in_size = dims[0]
out_size = dims[1]
start_pos = self.weight_positions[i]
end_pos = start_pos + (out_size * in_size)
self.weights[i] = np.reshape(
np.array(flat_weights[start_pos:end_pos]), (out_size, in_size))
start_pos = self.bias_positions[i]
end_pos = start_pos + out_size
self.biases[i] = np.reshape(
np.array(flat_weights[start_pos:end_pos]), (out_size))
def get_action(self, state):
neuron_values = np.reshape(np.array(state), (self.state_dimensionality))
for i in range(len(self.weights)):
neuron_values = np.matmul(self.weights[i], neuron_values)
neuron_values += self.biases[i]
if i < len(self.weights) - 1:
np.maximum(neuron_values, 0, neuron_values)
np.tanh(neuron_values, neuron_values) # this is sometimes not needed
return neuron_values
def get_initial(self):
return np.array(self.init_params)
def get_total_num_parameters(self):
return self.weight_positions[-1]
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
3b8746a1cdd4600634297132c55f8cb3205475c4 | d8349b7c3ca5289ea4627719699ae88b536fa24e | /uhr.py | bb9d4cef9887219b82c8773ba0814e754bdfe453 | [] | no_license | Mighty-Yth/Affinity | 8277ae59785f5663b1458e579f9f49e7719b4871 | a4f92421f014c0b296596234b0727bb2b0f526f1 | refs/heads/master | 2020-03-28T20:29:42.009120 | 2018-09-17T06:00:18 | 2018-09-17T06:00:18 | 149,075,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | import discord
from discord.ext import commands
class Uhr:
def __init__(self, identity,user,EXP):
self.identity = identity
self.user= user
self.EXP = EXP
def __str__(self):
return self.identity + ':' + self.user+':' + str(self.EXP)
def deposit(self,amount):
if amount >= 0:
self.EXP += amount
def remove(self,amount):
if amount >= 0 and amount<= self.EXP:
self.EXP -= amount | [
"noreply@github.com"
] | noreply@github.com |
c92b4463310cabc5b593f28b34d7d29802149be3 | 8f4e9d24de3dfbd2efae58877ab0043a7da57831 | /Learn_PhythonEx/ex6.py | a9b2054e8db34cbb6de3068fbfe0bc208451d780 | [] | no_license | dersonnex/Python_learning | 1cbcfe428a4765adabdca65d275b63c37acb0ea8 | 7827962c5f208b36c6511a20d220cba609494853 | refs/heads/master | 2021-01-12T06:00:52.179117 | 2017-11-14T09:43:54 | 2017-11-14T09:43:54 | 77,274,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | x= "There are %d types of people." % 10 # defines veriable X
binary = "binary" # defines veriable binary
do_not = "don't" # defines veriable do_not
y = "Those who know %s and those who %s." % (binary, do_not) # defines veriable y
print x
print y
print "I said: %r." % x #I said :there are 10 types of people.
print "I also said: '%s'." % y
hilarious = False
joke_evaluation = "Isn't that joke so fanny?! %r"
print joke_evaluation % hilarious
w = "This is the left side of ..."
e = "a string with a right side."
print w + e | [
"noreply@github.com"
] | noreply@github.com |
ca80285ee2929ac20cf43ad7fff92fb60b9efdea | f81c8e4d702d5c88af92c691d35b6f9c0d2f4390 | /backend/dark_waterfall_26026/wsgi.py | e5039146e98431c055564aea9a661c25a52173fd | [] | no_license | crowdbotics-apps/dark-waterfall-26026 | bdfd44240dae3c1ad20ed8b7a8da701308db5958 | 95f9eda959b6d21778ff59db2c5c9a585d6a670c | refs/heads/master | 2023-04-12T17:31:25.091727 | 2021-04-29T19:14:56 | 2021-04-29T19:14:56 | 362,922,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | """
WSGI config for dark_waterfall_26026 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dark_waterfall_26026.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
ae7a1e257d3423cfd604b1e6c27ffe19ee1012f5 | 6b3e8b4291c67195ad51e356ba46602a15d5fe38 | /rastervision2/examples/utils.py | d521e74560b2de4494f0d0ff4344208ee3e221b0 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | csaybar/raster-vision | 4f5bb1125d4fb3ae5c455db603d8fb749221dd74 | 617ca15f64e3b8a391432306a743f7d0dfff352f | refs/heads/master | 2021-02-26T19:02:53.752971 | 2020-02-27T17:25:31 | 2020-02-27T17:25:31 | 245,547,406 | 2 | 1 | NOASSERTION | 2020-03-07T01:24:09 | 2020-03-07T01:24:08 | null | UTF-8 | Python | false | false | 4,864 | py | import csv
from io import StringIO
import tempfile
import os
import rasterio
from shapely.strtree import STRtree
from shapely.geometry import shape, mapping
import shapely
from rastervision.core import Box
from rastervision.data import RasterioCRSTransformer, GeoJSONVectorSource
from rastervision.utils.files import (file_to_str, file_exists, get_local_path,
upload_or_copy, make_dir, json_to_file)
from rastervision.filesystem import S3FileSystem
def str_to_bool(x):
if type(x) == str:
if x.lower() == 'true':
return True
elif x.lower() == 'false':
return False
else:
raise ValueError('{} is expected to be true or false'.format(x))
return x
def get_scene_info(csv_uri):
csv_str = file_to_str(csv_uri)
reader = csv.reader(StringIO(csv_str), delimiter=',')
return list(reader)
def crop_image(image_uri, window, crop_uri):
im_dataset = rasterio.open(image_uri)
rasterio_window = window.rasterio_format()
im = im_dataset.read(window=rasterio_window)
with tempfile.TemporaryDirectory() as tmp_dir:
crop_path = get_local_path(crop_uri, tmp_dir)
make_dir(crop_path, use_dirname=True)
meta = im_dataset.meta
meta['width'], meta['height'] = window.get_width(), window.get_height()
meta['transform'] = rasterio.windows.transform(
rasterio_window, im_dataset.transform)
with rasterio.open(crop_path, 'w', **meta) as dst:
dst.colorinterp = im_dataset.colorinterp
dst.write(im)
upload_or_copy(crop_path, crop_uri)
def save_image_crop(image_uri,
image_crop_uri,
label_uri=None,
label_crop_uri=None,
size=600,
min_features=10,
vector_labels=True):
"""Save a crop of an image to use for testing.
If label_uri is set, the crop needs to cover >= min_features.
Args:
image_uri: URI of original image
image_crop_uri: URI of cropped image to save
label_uri: optional URI of label file
label_crop_uri: optional URI of cropped labels to save
size: height and width of crop
Raises:
ValueError if cannot find a crop satisfying min_features constraint.
"""
if not file_exists(image_crop_uri):
print('Saving test crop to {}...'.format(image_crop_uri))
old_environ = os.environ.copy()
try:
request_payer = S3FileSystem.get_request_payer()
if request_payer == 'requester':
os.environ['AWS_REQUEST_PAYER'] = request_payer
im_dataset = rasterio.open(image_uri)
h, w = im_dataset.height, im_dataset.width
extent = Box(0, 0, h, w)
windows = extent.get_windows(size, size)
if label_uri and vector_labels:
crs_transformer = RasterioCRSTransformer.from_dataset(
im_dataset)
vs = GeoJSONVectorSource(label_uri, crs_transformer)
geojson = vs.get_geojson()
geoms = []
for f in geojson['features']:
g = shape(f['geometry'])
geoms.append(g)
tree = STRtree(geoms)
def p2m(x, y, z=None):
return crs_transformer.pixel_to_map((x, y))
for w in windows:
use_window = True
if label_uri and vector_labels:
w_polys = tree.query(w.to_shapely())
use_window = len(w_polys) >= min_features
if use_window and label_crop_uri is not None:
print('Saving test crop labels to {}...'.format(
label_crop_uri))
label_crop_features = [
mapping(shapely.ops.transform(p2m, wp))
for wp in w_polys
]
label_crop_json = {
'type':
'FeatureCollection',
'features': [{
'geometry': f
} for f in label_crop_features]
}
json_to_file(label_crop_json, label_crop_uri)
if use_window:
crop_image(image_uri, w, image_crop_uri)
if not vector_labels and label_uri and label_crop_uri:
crop_image(label_uri, w, label_crop_uri)
break
if not use_window:
raise ValueError('Could not find a good crop.')
finally:
os.environ.clear()
os.environ.update(old_environ)
| [
"lewfish@gmail.com"
] | lewfish@gmail.com |
811fd686f5129b674ddd6d46c77719477b3fb263 | b2eb8af13e5532fc5c613bbd68af97fa5938b758 | /beginner level/count digits.py | 084171349b4c07bb3c473b3a1c85a5dcdc51e228 | [] | no_license | rahasudha2910/python-programming | 81964ffd61c6a814e22543a9315b05eca028fd59 | f3cfbb9a3d368cd17fbd59c6ce4affa83fe36585 | refs/heads/master | 2021-04-06T00:24:45.160387 | 2018-05-03T06:16:38 | 2018-05-03T06:16:38 | 125,213,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | count=0
number=int(input())
while(number>0):
number=number/10
count=count+1
print("numer of digits:",count)
| [
"noreply@github.com"
] | noreply@github.com |
c2835b1f8a3632284eca779d2dc1f17bfaf30295 | 6d501ea43b1a52bf4af44ae5677eba8b928ffec3 | /directory/signals.py | e1d22e0a309d7321f2db634715374ef5fabc6e4f | [] | no_license | mozilla/hive-django | 78d5e7bf687e2311a41d2b6d555b9671c4270b4d | bf95dce0af0148ecacde2256d235788fd79c7d5e | refs/heads/master | 2023-08-27T12:47:36.977377 | 2016-05-04T21:12:47 | 2016-05-04T21:12:47 | 55,106,672 | 0 | 2 | null | 2016-05-04T21:12:47 | 2016-03-31T00:12:58 | Python | UTF-8 | Python | false | false | 1,684 | py | from django.dispatch import receiver
from django.contrib.sites.models import Site
from django.db.models.signals import post_save
from django.contrib.auth.signals import user_logged_in
from django.contrib import messages
from registration.signals import user_activated
from .models import City, User, Organization, Membership, is_user_vouched_for
@receiver(post_save, sender=City)
def clear_site_cache_when_city_changes(**kwargs):
# It's possible that the site may be associated with a different
# city now, so clear the site cache.
Site.objects.clear_cache()
@receiver(post_save, sender=User)
def create_membership_for_user(sender, raw, instance, **kwargs):
if raw: return
if not len(Membership.objects.filter(user=instance)):
membership = Membership(user=instance)
membership.save()
@receiver(user_activated)
def auto_register_user_with_organization(sender, user, request, **kwargs):
if user.membership.organization: return
orgs = Organization.objects.possible_affiliations_for(user)
if orgs.count() != 1: return
org = orgs[0]
user.membership.organization = org
user.membership.save()
@receiver(user_logged_in)
def tell_user_to_update_their_profile(sender, user, request, **kwargs):
if not is_user_vouched_for(user): return
if not user.membership.bio:
messages.info(request,
'You don\'t have a bio! You should write one '
'so community members can learn more about you. '
'Just visit your user profile by accessing the '
'user menu at the top-right corner of this page.',
fail_silently=True)
| [
"varmaa@gmail.com"
] | varmaa@gmail.com |
f264cbe12ec190255d0fe7fb1219395eaff22bc8 | 743c3b0cd875fe294fc15b96de678c93ecd8ab27 | /foruser/myuser/urls.py | 838fc3a9e5c5fdfd03f5f634b2ec6fe3d8967638 | [] | no_license | yudian03/LOGIN | f3cc760ee25a34ce7b939de5475fc7f7097b59a3 | 3db6278bc15be6244187d9744f3bdf562c7d409f | refs/heads/master | 2020-05-01T04:30:17.146513 | 2019-03-23T10:51:11 | 2019-03-23T10:51:11 | 177,276,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | from django.urls import path
from . import views
urlpatterns = [
path('register/',views.register),
path('login/',views.login),
path('home/',views.home),
path('logout/',views.logout)
] | [
"noreply@github.com"
] | noreply@github.com |
a955aa2adcf7d72b65e3af9165bf022c5a057ec0 | 9d29b302cca89a4ad816f99f1d3c708862dd4c0b | /client.py | 73c58c8f77fdac730b4bde122ffd76801a4ac751 | [] | no_license | Manoj-M-97/Flight-Booking-System | a28c57c770ea06cc4c8704dbddc2740ec3d86fcd | 649d74c63d73a24a3fd97406008903f806ffa34b | refs/heads/master | 2020-03-22T04:02:38.788029 | 2018-07-02T16:48:21 | 2018-07-02T16:48:21 | 139,468,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | # Python program to implement client side of chat room.
import socket
import select
import sys
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if len(sys.argv) != 3:
print "Correct usage: script, IP address, port number"
exit()
IP_address = str(sys.argv[1])
Port = int(sys.argv[2])
server.connect((IP_address, Port))
cl="CLOSE"
while True:
# maintains a list of possible input streams
sockets_list = [sys.stdin, server]
""" There are two possible input situations. Either the
user wants to give manual input to send to other people,
or the server is sending a message to be printed on the
screen. Select returns from sockets_list, the stream that
is reader for input. So for example, if the server wants
to send a message, then the if condition will hold true
below.If the user wants to send a message, the else
condition will evaluate as true"""
read_sockets,write_socket, error_socket = select.select(sockets_list,[],[])
for socks in read_sockets:
if socks == server:
message = socks.recv(2048)
if (message.endswith(cl)):
print "Connection Terminated"
exit()
print message
else:
message = sys.stdin.readline()
server.send(message)
sys.stdout.flush()
server.close()
| [
"noreply@github.com"
] | noreply@github.com |
c3b2ccf3279e3d6c131b50d1a8a089fc8ee00b32 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/BizListDataInfo.py | 5f874dfae528b4b6592ad1306c025ec59eb0239e | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,206 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BizListDataInfo(object):
def __init__(self):
self._code = None
self._name = None
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def to_alipay_dict(self):
params = dict()
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BizListDataInfo()
if 'code' in d:
o.code = d['code']
if 'name' in d:
o.name = d['name']
return o
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
bd171b67cb9363e6bad907a04d5ab5e0bc909104 | c16d80fa4837ca849056dc1e66191825037969ed | /gptneo_piqa.py | d804ccdcc15115883cf1b8dceb7408a4520b8371 | [] | no_license | vivekvkashyap/gpt2-commonsens | c289819e440b52dfb7390c614494cd85437cd1c3 | f5d884bcf27c2bd2cb3cf8fa55f6151d12e17b9d | refs/heads/main | 2023-06-25T17:37:08.203910 | 2021-07-27T05:06:33 | 2021-07-27T05:06:33 | 389,845,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,707 | py | import jax
print(jax.local_device_count())
import jax.numpy as jnp
import flax
import flax.linen as nn
from flax.training.common_utils import get_metrics,onehot,shard,shard_prng_key
from flax.training import train_state
from flax.metrics.tensorboard import SummaryWriter
from flax.training import checkpoints
from datasets import load_dataset,load_metric
from transformers import GPT2Tokenizer
from tqdm import tqdm
import logging
import optax
import math
from pathlib import Path
from typing import Callable
from itertools import chain
from flax.metrics import tensorboard
from datasets import load_dataset,load_metric
from transformers import GPTNeoConfig,GPT2Tokenizer
from model_file import FlaxGPTNeoForMultipleChoice
logger = logging.getLogger()
logger.setLevel(logging.INFO)
tokenizer=GPT2Tokenizer.from_pretrained('EleutherAI/gpt-neo-1.3B',pad_token='<|endoftext|>')
dataset=load_dataset('piqa')
num_choices=2
def preprocess(example):
example['first_sentence']=[example['goal']]*num_choices
example['second_sentence']=[example[f'sol{i}'] for i in [1,2]]
return example
train_dataset=dataset['train'].map(preprocess)
validation_dataset=dataset['validation'].map(preprocess)
test_dataset=dataset['test'].map(preprocess)
len_train_dataset=16113
len_validation_dataset=1838
len_test_dataset=3084
train_dataset=train_dataset.select(range(len_train_dataset))
test_dataset=test_dataset.select(range(len_test_dataset))
validation_dataset=validation_dataset.select(range(len_validation_dataset))
remove_col=train_dataset.column_names
def tokenize(examples):
tokenized_examples=tokenizer(examples['first_sentence'],examples['second_sentence'],padding='max_length',truncation=True,max_length=256,return_tensors='jax')
tokenized_examples['labels']=int(examples['label'])
return tokenized_examples
train_dataset=train_dataset.map(tokenize)
validation_dataset=validation_dataset.map(tokenize)
train_dataset=train_dataset.remove_columns(remove_col)
validation_dataset=validation_dataset.remove_columns(remove_col)
test_dataset=test_dataset.remove_columns(remove_col)
per_device_batch_size=4
seed=0
num_train_epochs=3
learning_rate=2e-5
model = FlaxGPTNeoForMultipleChoice.from_pretrained('EleutherAI/gpt-neo-1.3B',input_shape=(1,num_choices,1))
total_batch_size = per_device_batch_size * jax.local_device_count()
print('The overall batch size (both for training and eval) is', total_batch_size)
num_train_steps = len(train_dataset) // total_batch_size * num_train_epochs
num_validation_steps=len(validation_dataset)//total_batch_size*num_train_epochs
learning_rate_function = optax.linear_schedule(init_value=learning_rate, end_value=0, transition_steps=num_train_steps)
class TrainState(train_state.TrainState):
logits_function:Callable=flax.struct.field(pytree_node=False)
loss_function:Callable=flax.struct.field(pytree_node=False)
def adamw(weight_decay):
return optax.adafactor(learning_rate=learning_rate_function)
decay_path=lambda p:not any(x in p for x in ['bias','LayerNorm.weight'])
def traverse(function):
def mask(data):
flat=flax.traverse_util.flatten_dict(data)
return flax.traverse_util.unflatten_dict({k:function(k,v) for k,v in flat.items()})
return mask
gradient_transformation=optax.chain(
optax.masked(adamw(0.0),mask=traverse(lambda path,_:decay_path(path))),
optax.masked(adamw(0.01),mask=traverse(lambda path,_:not decay_path(path))))
def loss_function(logits,labels):
logits=flax.linen.log_softmax(logits)
xentropy=optax.softmax_cross_entropy(logits,onehot(labels,num_classes=num_choices))
return jnp.mean(xentropy)
def eval_function(logits):
return logits.argmax(-1)
state=TrainState.create(apply_fn=model.__call__,
params=model.params,
tx=gradient_transformation,
logits_function=eval_function,
loss_function=loss_function)
def train_step(state,batch,dropout_rng):
targets=batch.pop("labels")
dropout_rng,new_dropout_rng=jax.random.split(dropout_rng)
def loss_function(params):
logits=state.apply_fn(**batch,params=params,dropout_rng=dropout_rng,train=True)[0]
loss=state.loss_function(logits,targets)
return loss
grad_function=jax.value_and_grad(loss_function)
loss,grad=grad_function(state.params)
grad=jax.lax.pmean(grad,"batch")
new_state=state.apply_gradients(grads=grad)
#Added.
logits=new_state.apply_fn(**batch,params=new_state.params,dropout_rng=dropout_rng,train=True)[0]
accuracy=jnp.equal(jnp.argmax(logits,axis=-1),targets)
metrics=jax.lax.pmean({"loss":loss,"learning_rate":learning_rate_function(state.step),'accuracy':accuracy},axis_name="batch")
return new_state,metrics,new_dropout_rng
parallel_train_step = jax.pmap(train_step, axis_name="batch", donate_argnums=(0,))
def eval_step(state, batch):
targets=batch.pop('labels')
logits = state.apply_fn(**batch, params=state.params, train=False)
loss=state.loss_function(logits,targets)
predictions=state.logits_function(logits)
eval_accuracy=jnp.equal(predictions,targets)
#eval_acc=jnp.equal(predictions,targets)
metrics=jax.lax.pmean({"loss":loss,'accuracy':eval_accuracy},axis_name="batch")
#return state.logits_function(logits) #(8,4)
return targets,predictions,metrics
parallel_eval_step = jax.pmap(eval_step, axis_name="batch")
def glue_train_data_loader(rng,dataset,batch_size):
steps_per_epoch=len_train_dataset//batch_size
perms=jax.random.permutation(rng,len_train_dataset)
perms=perms[:steps_per_epoch*batch_size]
perms=perms.reshape((steps_per_epoch,batch_size))
for perm in perms:
batch=dataset[perm]
#print(jnp.array(batch['label']))
batch={k:jnp.array(v) for k,v in batch.items()}
batch=shard(batch)
yield batch
rng=jax.random.PRNGKey(seed)
dropout_rngs=jax.random.split(rng,jax.local_device_count())
def glue_eval_data_loader(dataset, batch_size):
for i in range(len_validation_dataset // batch_size):
batch = dataset[i * batch_size : (i + 1) * batch_size]
batch = {k: jnp.array(v) for k, v in batch.items()}
batch = shard(batch)
yield batch
state = flax.jax_utils.replicate(state)
actual_task = "mnli"
metric = load_metric('glue', "mnli")
actual_taskmetric = load_metric('glue', actual_task)
workdir='./piqa_tensorboard'
summary_writer = tensorboard.SummaryWriter(workdir)
logger.info(f"***** Running training *****")
logger.info(f" Num examples = {len_train_dataset}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {per_device_batch_size}")
logger.info(f" Total train batch size = {total_batch_size}")
logger.info(f" Total optimization steps = {num_train_steps}")
for i, epoch in enumerate(tqdm(range(1, num_train_epochs+1), desc=f"Epoch ...", position=0, leave=True)):
rng, input_rng = jax.random.split(rng)
train_acc_metrics=[]
train_loss_metrics=[]
eval_acc_metrics=[]
eval_loss_metrics=[]
# train
with tqdm(total=len_train_dataset // total_batch_size, desc="Training...", leave=False) as progress_bar_train:
for idx,batch in enumerate(glue_train_data_loader(input_rng, train_dataset, total_batch_size)):
state, train_metric, dropout_rngs = parallel_train_step(state, batch, dropout_rngs)
train_acc_metrics.append(jax.device_get(train_metric['accuracy']).mean().item())
train_loss_metrics.append(flax.jax_utils.unreplicate(train_metric)['loss'].item())
if idx%5==0:
summary_writer.scalar('train_loss',flax.jax_utils.unreplicate(train_metric)['loss'].item(),idx)
summary_writer.scalar('train_accuracy', jax.device_get(train_metric['accuracy']).mean().item(),idx)
if idx%20==0:
logger.info(f"train_step_loss{idx}: {flax.jax_utils.unreplicate(train_metric)['loss'].item()} train_step_acc{idx}: {jax.device_get(train_metric['accuracy']).mean().item()} ")
progress_bar_train.update(1)
# evaluate
with tqdm(total=len_validation_dataset // total_batch_size, desc="Evaluating...", leave=False) as progress_bar_eval:
for idx,batch in enumerate(glue_eval_data_loader(validation_dataset, total_batch_size)):
labels,predictions,eval_metric=parallel_eval_step(state, batch)
eval_acc_metrics.append(jax.device_get(eval_metric['accuracy']).mean().item())
eval_loss_metrics.append(flax.jax_utils.unreplicate(eval_metric)['loss'].item())
progress_bar_eval.update(1)
if idx%5==0:
logger.info(f"eval_step_loss {idx} : {flax.jax_utils.unreplicate(eval_metric)['loss'].item()} eval_step_acc {idx} : {jax.device_get(eval_metric['accuracy']).mean().item()}")
summary_writer.scalar('eval_loss : ', flax.jax_utils.unreplicate(eval_metric)['loss'].item(),idx)
summary_writer.scalar('eval_accuracy : ', jax.device_get(eval_metric['accuracy']).mean().item(),idx)
logger.info(f"---------------------Epoch {epoch} done-----------------")
logger.info(f"Train loss: {jax.device_get(jnp.array(train_loss_metrics)).mean().item()} Train accuracy: {jax.device_get(jnp.array(train_acc_metrics)).mean().item()}")
logger.info(f"Eval loss: {jax.device_get(jnp.array(eval_loss_metrics)).mean().item()} Eval accuracy: {jax.device_get(jnp.array(eval_acc_metrics)).mean().item()}")
if jax.process_index() == 0:
params = jax.device_get(jax.tree_map(lambda x: x[0], state.params))
model.save_pretrained(
'./',
params=params,
push_to_hub=True,
commit_message=f"Piqa:Saving weights of epoch {epoch} at step {idx}",)
summary_writer.flush()
| [
"noreply@github.com"
] | noreply@github.com |
4716976f68bf061fef859306dd4192440aa5d090 | 94312b972c9ea96404535d26a297c72e75f84d22 | /Weather_WebCrawl.py | 350443ebd66136fe19578ad51278528825577cdc | [] | no_license | 1LuvCode/My_Slut_TJ | 2e8092d78857497a45a22d4af2270dc4c51cdada | d7f39542cccb51b46d4d53d6489ef3b82079bc4d | refs/heads/main | 2023-02-25T20:16:32.461565 | 2021-02-02T11:28:37 | 2021-02-02T11:28:37 | 335,256,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,695 | py | import requests
from bs4 import BeautifulSoup
def Crawling_Weather(Finallocation):
url = 'https://search.naver.com/search.naver?where=nexearch&sm=top_hty&fbm=1&ie=utf8&query=' + Finallocation
hdr = {'User-Agent': (
'mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/78.0.3904.70 safari/537.36')}
req = requests.get(url, headers=hdr)
html = req.text
soup = BeautifulSoup(html, 'html.parser')
LocationInfo = ""
NowTemp = ""
CheckDust = []
# 오류 체크
ErrorCheck = soup.find('span', {'class': 'btn_select'})
if 'None' in str(ErrorCheck):
print("Error! 지역 검색 오류!")
return None
else:
# 지역 정보
for i in soup.select('span[class=btn_select]'):
LocationInfo = i.text
# 현재 온도
NowTemp = soup.find('span', {'class': 'todaytemp'}).text + soup.find('span', {'class': 'tempmark'}).text[2:]
# 날씨 캐스트
WeatherCast = soup.find('p', {'class': 'cast_txt'}).text
# 오늘 오전온도, 오후온도, 체감온도
TodayMorningTemp = soup.find('span', {'class': 'min'}).text
TodayAfternoonTemp = soup.find('span', {'class': 'max'}).text
TodayFeelTemp = soup.find('span', {'class': 'sensible'}).text[5:]
# 자외선 지수
TodayUV = soup.find('span', {'class': 'indicator'}).text[4:-2] + " " + soup.find('span',
{'class': 'indicator'}).text[-2:]
# 미세먼지, 초미세먼지, 오존 지수
CheckDust1 = soup.find('div', {'class': 'sub_info'})
CheckDust2 = CheckDust1.find('div', {'class': 'detail_box'})
for i in CheckDust2.select('dd'):
CheckDust.append(i.text)
FineDust = CheckDust[0][:-2] + " " + CheckDust[0][-2:]
UltraFineDust = CheckDust[1][:-2] + " " + CheckDust[1][-2:]
Ozon = CheckDust[2][:-2] + " " + CheckDust[2][-2:]
# 내일 오전, 오후 온도 및 상태 체크
tomorrowArea = soup.find('div', {'class': 'tomorrow_area'})
tomorrowCheck = tomorrowArea.find_all('div', {'class': 'main_info morning_box'})
# 내일 오전온도
tomorrowMoring1 = tomorrowCheck[0].find('span', {'class': 'todaytemp'}).text
tomorrowMoring2 = tomorrowCheck[0].find('span', {'class': 'tempmark'}).text[2:]
tomorrowMoring = tomorrowMoring1 + tomorrowMoring2
# 내일 오전상태
tomorrowMState1 = tomorrowCheck[0].find('div', {'class': 'info_data'})
tomorrowMState2 = tomorrowMState1.find('ul', {'class': 'info_list'})
tomorrowMState3 = tomorrowMState2.find('p', {'class': 'cast_txt'}).text
tomorrowMState4 = tomorrowMState2.find('div', {'class': 'detail_box'})
tomorrowMState5 = tomorrowMState4.find('span').text.strip()
tomorrowMState = tomorrowMState3 + " " + tomorrowMState5
# 내일 오후온도
tomorrowAfter1 = tomorrowCheck[1].find('p', {'class': 'info_temperature'})
tomorrowAfter2 = tomorrowAfter1.find('span', {'class': 'todaytemp'}).text
tomorrowAfter3 = tomorrowAfter1.find('span', {'class': 'tempmark'}).text[2:]
tomorrowAfter = tomorrowAfter2 + tomorrowAfter3
# 내일 오후상태
tomorrowAState1 = tomorrowCheck[1].find('div', {'class': 'info_data'})
tomorrowAState2 = tomorrowAState1.find('ul', {'class': 'info_list'})
tomorrowAState3 = tomorrowAState2.find('p', {'class': 'cast_txt'}).text
tomorrowAState4 = tomorrowAState2.find('div', {'class': 'detail_box'})
tomorrowAState5 = tomorrowAState4.find('span').text.strip()
tomorrowAState = tomorrowAState3 + " " + tomorrowAState5
Weather_info_dict = {
'지역':LocationInfo,
'현재온도':NowTemp,
'체감온도':TodayFeelTemp,
'오전온도':TodayMorningTemp,
'오후온도':TodayAfternoonTemp,
'현재상태':WeatherCast,
'현재자외선지수':TodayUV,
'현재미세먼지농도':FineDust,
'현재초미세먼지농도':UltraFineDust,
'현재오존지수':Ozon,
'내일오전온도':tomorrowMoring,
'내일오전상태':tomorrowMState,
'내일오후온도':tomorrowAfter,
'내일오후상태':tomorrowAState
}
return Weather_info_dict
# print("=========================================")
# print(LocationInfo + " 날씨 정보입니다.")
# print("=========================================")
# print("현재온도: " + NowTemp)
# print("체감온도: " + TodayFeelTemp)
# print("오전/오후 온도: " + TodayMorningTemp + "/" + TodayAfternoonTemp)
# print("현재 상태: " + WeatherCast)
# print("현재 자외선 지수: " + TodayUV)
# print("현재 미세먼지 농도: " + FineDust)
# print("현재 초미세먼지 농도: " + UltraFineDust)
# print("현재 오존 지수: " + Ozon)
# print("=========================================")
# print(LocationInfo + " 내일 날씨 정보입니다.")
# print("=========================================")
# print("내일 오전 온도: " + tomorrowMoring)
# print("내일 오전 상태: " + tomorrowMState)
# print("내일 오후 온도: " + tomorrowAfter)
# print("내일 오후 상태: " + tomorrowAState)
| [
"noreply@github.com"
] | noreply@github.com |
707062ffa62600fed5892717cfc5efb6677b3277 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_plough.py | 8524ffbb0f26cf406e78e16dbed5ed7ccee77fc1 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py |
#calss header
class _PLOUGH():
def __init__(self,):
self.name = "PLOUGH"
self.definitions = [u'a large farming tool with blades that digs the soil in fields so that seeds can be planted', u'If land is under the plough, crops are grown on it: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
8aa66e9bfbe8bd636da164d691be14c9753a0cf6 | 2e318c8fdbb8e8826937ffbf1eede7034a47960a | /GazeGAN_using_CSC/train_old1.py | 3a4c6f4bdfa7d3250a264ab2b5f775c39e7fdeb4 | [] | no_license | chenkeshuai/Sal-CFS-GAN | e06efbe5e49360c8f5634704c487483795c10d31 | 8ae0fb77efff503190bcc8b6333c1d21ea1bfbce | refs/heads/master | 2022-06-06T01:18:00.664722 | 2020-05-06T10:54:11 | 2020-05-06T10:54:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,507 | py | ### Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
import time
from collections import OrderedDict
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
import util.util as util
from util.visualizer import Visualizer
import os
import numpy as np
import torch
from torch.autograd import Variable
opt = TrainOptions().parse()
iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')
if opt.continue_train:
try:
start_epoch, epoch_iter = np.loadtxt(iter_path , delimiter=',', dtype=int)
except:
start_epoch, epoch_iter = 1, 0
print('Resuming from epoch %d at iteration %d' % (start_epoch, epoch_iter))
else:
start_epoch, epoch_iter = 1, 0
if opt.debug:
opt.display_freq = 1
opt.print_freq = 1
opt.niter = 1
opt.niter_decay = 0
opt.max_dataset_size = 10
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)
model = create_model(opt)
visualizer = Visualizer(opt)
total_steps = (start_epoch-1) * dataset_size + epoch_iter
display_delta = total_steps % opt.display_freq
print_delta = total_steps % opt.print_freq
save_delta = total_steps % opt.save_latest_freq
My_Limit = 600 # just for debugging phase, to control the total training steps for saving time
for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
if epoch != start_epoch:
epoch_iter = epoch_iter % dataset_size
for i, data in enumerate(dataset, start=epoch_iter):
if(i > My_Limit):
break
iter_start_time = time.time()
total_steps += opt.batchSize
epoch_iter += opt.batchSize
# whether to collect output images
save_fake = total_steps % opt.display_freq == display_delta
############## Forward Pass ######################
losses, generated = model(Variable(data['label']), Variable(data['inst']),
Variable(data['image']), Variable(data['feat']), infer=save_fake)
# sum per device losses
losses = [ torch.mean(x) if not isinstance(x, int) else x for x in losses ]
loss_dict = dict(zip(model.module.loss_names, losses))
print("loss dict is :", loss_dict)
# calculate final loss scalar
# loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5
# loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat',0) + loss_dict.get('G_VGG',0)
loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5
loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat',0) + loss_dict.get('G_VGG',0) + loss_dict.get('Loss_CC',0)
print("CC loss is :", loss_dict.get('Loss_CC',0))
############### Backward Pass ####################
# update generator weights
model.module.optimizer_G.zero_grad()
loss_G.backward()
model.module.optimizer_G.step()
# update discriminator weights
model.module.optimizer_D.zero_grad()
loss_D.backward()
model.module.optimizer_D.step()
#call(["nvidia-smi", "--format=csv", "--query-gpu=memory.used,memory.free"])
############## Display results and errors ##########
### print out errors
if total_steps % opt.print_freq == print_delta:
errors = {k: v.data[0] if not isinstance(v, int) else v for k, v in loss_dict.items()}
t = (time.time() - iter_start_time) / opt.batchSize
visualizer.print_current_errors(epoch, epoch_iter, errors, t)
visualizer.plot_current_errors(errors, total_steps)
### display output images
if save_fake:
visuals = OrderedDict([('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
('synthesized_image', util.tensor2im(generated.data[0])),
('real_image', util.tensor2im(data['image'][0]))])
visualizer.display_current_results(visuals, epoch, total_steps)
### save latest model
if total_steps % opt.save_latest_freq == save_delta:
print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
model.module.save('latest')
np.savetxt(iter_path, (epoch, epoch_iter), delimiter=',', fmt='%d')
if epoch_iter >= dataset_size:
break
# end of epoch
iter_end_time = time.time()
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
'''
### save model for this epoch
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))
model.module.save('latest')
model.module.save(epoch)
np.savetxt(iter_path, (epoch+1, 0), delimiter=',', fmt='%d')
'''
### instead of only training the local enhancer, train the entire network after certain iterations
if (opt.niter_fix_global != 0) and (epoch == opt.niter_fix_global):
model.module.update_fixed_params()
### linearly decay learning rate after certain iterations
if epoch > opt.niter:
model.module.update_learning_rate()
| [
"noreply@github.com"
] | noreply@github.com |
effa795ba011f8dc2f6b6da9ac6642b41478c955 | ac2567d2be46412f10a47aba6b062347fb831ec9 | /twitterTest.py | d48b78dc469f066c5c8a5c8ce76e0454cb493cd7 | [] | no_license | rhymg/TwitterScraping | e9e8d4098ba4d28cdb0d17f76de98a81c08432aa | 769effdbdf83a170c13d2cac51ca5df7956e2dab | refs/heads/master | 2022-11-24T11:46:46.637370 | 2020-07-18T19:19:17 | 2020-07-18T19:19:17 | 280,906,432 | 0 | 0 | null | 2020-07-19T16:34:18 | 2020-07-19T16:34:17 | null | UTF-8 | Python | false | false | 531 | py | import GetOldTweets3 as got;
word = 'fuck';
f = open("usernameTest.txt", "a");
tweetCriteria = got.manager.TweetCriteria().setQuerySearch(word).setMaxTweets(10);
tweets = got.manager.TweetManager.getTweets(tweetCriteria);
for tweet in tweets:
print(tweet.text + ' BY: ' + tweet.username + '\n');
if word in tweet.text.lower():
print('This has ' + word + ' in it.\n');
f.write(tweet.username + '\n');
else:
print('This does not have ' + word + ' in it.\n');
f.close();
| [
"noreply@github.com"
] | noreply@github.com |
11e480051d1e2e4b524f910449fa7a03d3d0f592 | b3db0cb0849fc3c981077cc5dc071c6eac6fd1ed | /C.1.14.py | f9372e61feee0ec164fad2d51aa25739dd93f3b8 | [] | no_license | Andi-Abdi/Tugas-Struktur-Data | bcfcfd3cf4ac28ce966b30d07041d775b33db778 | 49162ad9c5869161df01bc1a0f8697c2d7d1623a | refs/heads/main | 2023-05-11T11:58:46.484954 | 2021-05-31T14:46:22 | 2021-05-31T14:46:22 | 372,522,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | def odd_product_pair(data):
data = set(data)
for y in data:
for x in data:
if y == x :
continue
if y*x % 2 == 1:
return True
return False
print(odd_product_pair([5,7,9,14,16])) | [
"noreply@github.com"
] | noreply@github.com |
275aa3e362920aae1e2af84fe0380f36fa448f39 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/pygame/pygameweb/pygameweb/db.py | 57c70ca70133b811d4447037d0df7cd54b72e632 | [
"BSD-2-Clause"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:95c026dc0e7051336cd999158979e81f159d4470489660469d0e0175c66400da
size 1274
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
ec81f69f8b35b27ca38c0fabe125ba6ef4bc3a1d | 1975ee674b36084366b1bbe2c091d8f0f8795dc0 | /demo/class_views.py | 49ac0086b684256a0215318d23d4992296ad6f5e | [] | no_license | srikanthpragada/PYTHON_03_JULY_2018_WEBDEMO | f193213788deadcab7ac7b183328269ba1334488 | 56e076ad30703117cafc56d6d95449c6ec8eebb2 | refs/heads/master | 2020-03-25T11:45:53.128704 | 2018-08-23T15:29:05 | 2018-08-23T15:29:05 | 143,747,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | from django.views.generic import TemplateView, ListView
from django.shortcuts import render
from .forms import LoginForm
from .models import Course
class ClassView1(TemplateView):
template_name = 'class_view1.html'
class LoginView(TemplateView):
template_name = 'login.html'
def get(self, request):
form = LoginForm()
return render(request, self.template_name, {'form': form})
def post(self, request):
form = LoginForm(request.POST)
if form.is_valid():
print(form.cleaned_data['username'], form.cleaned_data['password'])
return render(request, self.template_name, {'form': form})
# Generic View - ListView demo
class ListCourseView(ListView):
model = Course
template_name = "courses.html" # default is demo/course_list.html
context_object_name = 'courses' # default is object_list
| [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
946eaac05979a4f663b7fefeba08d4f1dd8efb16 | d21c924fc23b812aaedeb2cfa3dfb108535a507f | /tw2/jqplugins/fg/defaults.py | 310f64fb53f192fa733e55b3ba04ea7270501562 | [] | no_license | toscawidgets/tw2.jqplugins.fg | eba3a90949c59dd7c6b3740ab09faa9b5d824a6d | 8317f3bec82364b95e86aa3655c7f787b25d715f | refs/heads/master | 2020-05-17T12:13:14.385977 | 2011-11-04T15:41:50 | 2011-11-04T15:41:50 | 954,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | #jQuery.ui
_fg_dirname_ = 'jquery/fg/%(subdir)s'
| [
"ralph.bean@gmail.com"
] | ralph.bean@gmail.com |
79848a0117879783d1f2f0c37b6a8586c18147c6 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/IPV6-TCP-MIB.py | ae7c821868888b0850cd5394fcb2bb61fbdbaeb3 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 5,095 | py | #
# PySNMP MIB module IPV6-TCP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/IPV6-TCP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:45:44 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint")
Ipv6Address, Ipv6IfIndexOrZero = mibBuilder.importSymbols("IPV6-TC", "Ipv6Address", "Ipv6IfIndexOrZero")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, experimental, ObjectIdentity, Gauge32, Counter64, Counter32, Bits, NotificationType, IpAddress, ModuleIdentity, Integer32, iso, TimeTicks, Unsigned32, mib_2, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "experimental", "ObjectIdentity", "Gauge32", "Counter64", "Counter32", "Bits", "NotificationType", "IpAddress", "ModuleIdentity", "Integer32", "iso", "TimeTicks", "Unsigned32", "mib-2", "MibIdentifier")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ipv6TcpMIB = ModuleIdentity((1, 3, 6, 1, 3, 86))
ipv6TcpMIB.setRevisions(('2017-02-22 00:00', '1998-01-29 00:00',))
if mibBuilder.loadTexts: ipv6TcpMIB.setLastUpdated('201702220000Z')
if mibBuilder.loadTexts: ipv6TcpMIB.setOrganization('IETF IPv6 MIB Working Group')
tcp = MibIdentifier((1, 3, 6, 1, 2, 1, 6))
ipv6TcpConnTable = MibTable((1, 3, 6, 1, 2, 1, 6, 16), )
if mibBuilder.loadTexts: ipv6TcpConnTable.setStatus('obsolete')
ipv6TcpConnEntry = MibTableRow((1, 3, 6, 1, 2, 1, 6, 16, 1), ).setIndexNames((0, "IPV6-TCP-MIB", "ipv6TcpConnLocalAddress"), (0, "IPV6-TCP-MIB", "ipv6TcpConnLocalPort"), (0, "IPV6-TCP-MIB", "ipv6TcpConnRemAddress"), (0, "IPV6-TCP-MIB", "ipv6TcpConnRemPort"), (0, "IPV6-TCP-MIB", "ipv6TcpConnIfIndex"))
if mibBuilder.loadTexts: ipv6TcpConnEntry.setStatus('obsolete')
ipv6TcpConnLocalAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 6, 16, 1, 1), Ipv6Address())
if mibBuilder.loadTexts: ipv6TcpConnLocalAddress.setStatus('obsolete')
ipv6TcpConnLocalPort = MibTableColumn((1, 3, 6, 1, 2, 1, 6, 16, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)))
if mibBuilder.loadTexts: ipv6TcpConnLocalPort.setStatus('obsolete')
ipv6TcpConnRemAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 6, 16, 1, 3), Ipv6Address())
if mibBuilder.loadTexts: ipv6TcpConnRemAddress.setStatus('obsolete')
ipv6TcpConnRemPort = MibTableColumn((1, 3, 6, 1, 2, 1, 6, 16, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)))
if mibBuilder.loadTexts: ipv6TcpConnRemPort.setStatus('obsolete')
ipv6TcpConnIfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 6, 16, 1, 5), Ipv6IfIndexOrZero())
if mibBuilder.loadTexts: ipv6TcpConnIfIndex.setStatus('obsolete')
ipv6TcpConnState = MibTableColumn((1, 3, 6, 1, 2, 1, 6, 16, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("closed", 1), ("listen", 2), ("synSent", 3), ("synReceived", 4), ("established", 5), ("finWait1", 6), ("finWait2", 7), ("closeWait", 8), ("lastAck", 9), ("closing", 10), ("timeWait", 11), ("deleteTCB", 12)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipv6TcpConnState.setStatus('obsolete')
ipv6TcpConformance = MibIdentifier((1, 3, 6, 1, 3, 86, 2))
ipv6TcpCompliances = MibIdentifier((1, 3, 6, 1, 3, 86, 2, 1))
ipv6TcpGroups = MibIdentifier((1, 3, 6, 1, 3, 86, 2, 2))
ipv6TcpCompliance = ModuleCompliance((1, 3, 6, 1, 3, 86, 2, 1, 1)).setObjects(("IPV6-TCP-MIB", "ipv6TcpGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ipv6TcpCompliance = ipv6TcpCompliance.setStatus('obsolete')
ipv6TcpGroup = ObjectGroup((1, 3, 6, 1, 3, 86, 2, 2, 1)).setObjects(("IPV6-TCP-MIB", "ipv6TcpConnState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ipv6TcpGroup = ipv6TcpGroup.setStatus('obsolete')
mibBuilder.exportSymbols("IPV6-TCP-MIB", ipv6TcpConnTable=ipv6TcpConnTable, ipv6TcpConnEntry=ipv6TcpConnEntry, ipv6TcpMIB=ipv6TcpMIB, ipv6TcpGroups=ipv6TcpGroups, ipv6TcpConnIfIndex=ipv6TcpConnIfIndex, tcp=tcp, ipv6TcpConnRemPort=ipv6TcpConnRemPort, ipv6TcpConformance=ipv6TcpConformance, PYSNMP_MODULE_ID=ipv6TcpMIB, ipv6TcpConnState=ipv6TcpConnState, ipv6TcpConnRemAddress=ipv6TcpConnRemAddress, ipv6TcpConnLocalPort=ipv6TcpConnLocalPort, ipv6TcpCompliances=ipv6TcpCompliances, ipv6TcpConnLocalAddress=ipv6TcpConnLocalAddress, ipv6TcpCompliance=ipv6TcpCompliance, ipv6TcpGroup=ipv6TcpGroup)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
480127fceb33213f368de855a806d8bd709a0909 | 2136c75df909b40c2667679b2ba4740d8b50a299 | /test.py | 86957845b7c17d62c3ce76575f6a1f07d42c824f | [] | no_license | jianglikun/preMLI | 19e91935266539afa15cb86a3e62608840c775d1 | 54b48fba7adf7fb232ac1a2cec883c596d49d3a3 | refs/heads/main | 2023-09-04T17:32:13.657101 | 2021-11-10T08:27:42 | 2021-11-10T08:27:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,748 | py | import os
os.environ["CUDA_VISIBLE_DEVICES"]="4"
from model import get_model
from model import get_model_max
from model import get_model_C_mul
from model import get_model_C_sub
import tensorflow as tf
import numpy as np
from sklearn.metrics import roc_auc_score,average_precision_score, f1_score
from sklearn.metrics import accuracy_score,recall_score
def stat(y_label,y_pred):
# print('y_label=',y_label)
# print('y_pred=',y_pred)
threshold = 0.5
auc = roc_auc_score(y_label, y_pred)
aupr = average_precision_score(y_label, y_pred)
for i in range(len(y_pred)):
if y_pred[i][0] >= threshold:
y_pred[i][0] = 1
if y_pred[i][0] < threshold:
y_pred[i][0] = 0
TP = 0
TN = 0
FP = 0
FN = 0
for i in range(len(y_pred)):
if y_pred[i][0] == 0 and y_label[i] == 0:
TN = TN + 1
if y_pred[i][0] == 1 and y_label[i] == 1:
TP = TP + 1
if y_pred[i][0] == 0 and y_label[i] == 1:
FN = FN + 1
if y_pred[i][0] == 1 and y_label[i] == 0:
FP = FP + 1
specificity = TN/(TN+FP)
recall = recall_score(y_label,y_pred)
acc = accuracy_score(y_label,y_pred)
f1 = f1_score(y_label, y_pred)
acc = round(acc, 4)
auc = round(auc,4)
aupr = round(aupr, 4)
f1 = round(f1,4)
return acc,auc,aupr,f1,recall,specificity
##########################
datatype = 2021
kmer = 3
##########################
for m in range(100):
model=None
model=get_model()
model.load_weights('./model/3mer2021/Solanum lycopersicumModel%s.h5'%m)
if datatype == 2020:
names = ['Arabidopsis lyrata','Solanum lycopersicum']
elif datatype == 2021:
names = ['aly','mtr','stu','bdi']
for name in names:
Data_dir='/home/yxy/Project/002/processData/3mer/'
if datatype == 2020:
test=np.load(Data_dir+'5mer%s_test.npz'%name)
elif datatype == 2021:
test=np.load(Data_dir+'%s%stest2021.npz'%(name,kmer))
X_mi_tes,X_lnc_tes,y_tes=test['X_mi_tes'],test['X_lnc_tes'],test['y_tes']
print("****************Testing %s specific model on %s cell line****************"%(m,name))
y_pred = model.predict([X_mi_tes,X_lnc_tes])
auc = roc_auc_score(y_tes, y_pred)
aupr = average_precision_score(y_tes, y_pred)
f1 = f1_score(y_tes, np.round(y_pred.reshape(-1)))
print("AUC : ", auc)
print("AUPR : ", aupr)
print("f1_score", f1)
acc,auc,aupr,f1,recall,specificity = stat(y_tes, y_pred)
print("ACC : ", acc,"auc : ", auc,"aupr :" , aupr,"f1 : ", f1,"recall : ",recall,"specificity : ",specificity)
| [
"noreply@github.com"
] | noreply@github.com |
31068cd2c89faea0c9efdff5214f7c0d9abac707 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_suffered.py | f5ba9fb4722605fcd51182e2e5bcc1348faf8603 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
#calss header
class _SUFFERED():
def __init__(self,):
self.name = "SUFFERED"
self.definitions = suffer
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['suffer']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
351ef3112a8105eea8a02b98a6ff6303a19eee43 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Integration/trend_LinearTrend/cycle_30/ar_/test_artificial_128_Integration_LinearTrend_30__100.py | 7a5e907e035774475c35332c1022bd9fc95546df | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 275 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 30, transform = "Integration", sigma = 0.0, exog_count = 100, ar_order = 0); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
42f0deaf250627b10751156d712d786cdc96ee26 | 6bf1b595a7f4d3cbf0995455869d438a7d0e0624 | /lingvo/tasks/milan/score_functions.py | 9c4ce867b372dfed657bec15a96096952923b006 | [
"Apache-2.0"
] | permissive | huaxz1986/lingvo | 889abc82b1bab6f37ba861c41eb480b7e89362c0 | b83984577610423e3b1c6b04ca248cd23f2842f7 | refs/heads/master | 2022-05-15T03:29:56.903688 | 2022-04-02T01:41:25 | 2022-04-02T01:41:25 | 173,536,461 | 1 | 0 | Apache-2.0 | 2019-03-03T05:52:01 | 2019-03-03T05:52:01 | null | UTF-8 | Python | false | false | 1,664 | py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of combination functions for dual-encoder models."""
from lingvo import compat as tf
from lingvo.core import base_layer
class DotProductScoreFunction(base_layer.BaseLayer):
"""Performs dot product combination between two encoded vectors."""
@classmethod
def Params(cls):
p = super().Params()
p.name = 'dot_product_score_function'
return p
def FProp(self, theta, x, y):
"""Computes pair-wise dot product similarity.
Args:
theta: NestedMap of variables belonging to this layer and its children.
x: batch of encoded representations from modality x. A float32 Tensor of
shape [x_batch_size, encoded_dim]
y: batch of encoded representations from modality y. A float32 Tensor of
shape [y_batch_size, encoded_dim]
Returns:
Pairwise dot products. A float32 Tensor with shape
`[x_batch_size, y_batch_size]`.
"""
return tf.matmul(x, y, transpose_b=True)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
8158442771c431dd35672a9edc586edd0fe33d1d | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/leetCode/BreadthFirstSearch/103_BinaryTreeZigzagLevelOrderTraversal.py | 4445a0088162de197a6843a1be5b63a07388215c | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 797 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
c.. Solution o..
___ zigzagLevelOrder root
__ n.. root:
r_ []
left2right = 1
# 1. scan the level from left to right. -1 reverse.
ans, stack, temp # list, [root], []
_____ stack:
temp = [node.val ___ node __ stack]
stack = [child ___ node __ stack
___ child __ (node.left, node.right) __ child]
ans += [temp[::left2right]] # Pythonic way
left2right *= -1
r_ ans
"""
[]
[1]
[1,2,3]
[0,1,2,3,4,5,6,null,null,7,null,8,9,null,10]
"""
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
18418bc2a39d5aeb5d6d8aaa063f549811e5c5cf | 9c7f47b2f31ea4ae55e33c706efe524eb62ff177 | /HT_11/HT_11_1.py | 3fdafbfbc8e4ba14a7b22a1f6076c98a1208a2cc | [] | no_license | Kantarian/GITHUB | 05b6d5425b345667a4188ced23da76ed337b910a | fa047cbb2beb9bf372b22596bea8aaef80423872 | refs/heads/main | 2023-02-14T16:57:50.229446 | 2021-01-13T15:43:48 | 2021-01-13T15:43:48 | 311,783,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,146 | py | #1. Створити клас Calc, який буде мати атребут last_result та 4 методи. Методи повинні виконувати математичні операції з 2-ма числами, а саме додавання, віднімання,
# множення, ділення.
# - Якщо під час створення екземпляру класу звернутися до атребута last_result він повинен повернути пусте значення
# - Якщо використати один з методів - last_result повенен повернути результат виконання попереднього методу.
# - Додати документування в клас (можете почитати цю статтю: https://realpython.com/documenting-python-code/ )
class Calc():
def __init__(self,a,b,last_result = None):
self.a=a
self.b=b
self.last_result = last_result
def add(self):
self.last_result = self.a+self.b
return self.last_result
def mul(self):
self.last_result = self.a*self.b
return self.last_result
def div(self):
self.last_result = self.a/self.b
return self.last_result
def sub(self):
self.last_result = self.a-self.b
return self.last_result
a=int(input("Enter first number: "))
b=int(input("Enter second number: "))
obj=Calc(a,b)
choice=1
while choice!=0:
print("0. Exit")
print("1. Add")
print("2. Subtraction")
print("3. Multiplication")
print("4. Division")
print("5. Last result")
choice=int(input("Enter choice: "))
if choice==1:
print("Result: ",obj.add())
elif choice==2:
print("Result: ",obj.sub())
elif choice==3:
print("Result: ",obj.mul())
elif choice==4:
print("Result: ",round(obj.div(),2))
elif choice==5:
print("Last Result: ",round(obj.last_result))
elif choice==0:
print("Exiting!")
else:
print("Invalid choice!!")
| [
"noreply@github.com"
] | noreply@github.com |
da74b5b74654f0fbd6447f906cfa0864252ad0ea | 43e788ee824ce1f6611d42690688136e5840af0e | /Video.py | 5727fe4166addad073efc4954296de4a11e5ee5a | [] | no_license | Karthik8396/lrn_opencv2 | 3b9c9d824bee26c5d3c5c8ab54fb12e5a9bf145e | 1d475f5b285cca187ff449f0036dcfe3dd5db136 | refs/heads/master | 2020-07-10T05:09:03.104573 | 2019-08-31T14:23:17 | 2019-08-31T14:23:17 | 204,174,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | import cv2
import numpy
cap=cv2.VideoCapture(0) #first webcam
fourcc =cv2.VideoWriter_fourcc(*'XVID') # for saving the video and fourcc is codec
out=cv2.VideoWriter('output.avi',fourcc,20.0,(640,480)) # adding codec and size of video
cv2.VideoWriter()
while True :
ret,frame = cap.read()
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
cv2.imshow('frame',frame)
cv2.imshow('gray',gray)
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'): #waitkey return 32 bit value(32 ones) 0xFF is 11111111(8 bit value),logical and makes it true and if executes
break #ord is for getting key value
cap.release()
out.release()
cv2.destroyAllWindows() | [
"noreply@github.com"
] | noreply@github.com |
8031b06595673b677e41319deb604caa3164a455 | 5ca39c2f45bdef4f93e57b17a357a2565fe1cf02 | /contactbook.py | 05a5715d3a06a40a21e502278f0cf56788ca7c36 | [] | no_license | Ajit1999/ContactBook-API | de6f51d0e1fcf49b5c8b8bfacf4b7750b64b9356 | df64583db98eb3421f07177f3c7dbb771c218ac4 | refs/heads/main | 2023-07-12T00:12:38.396876 | 2021-08-22T11:55:31 | 2021-08-22T11:55:31 | 398,787,514 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,215 | py | from flask import Flask
from flask_pymongo import PyMongo
from bson.json_util import dumps
from bson.objectid import ObjectId
from flask import jsonify, request
app = Flask(__name__)
app.secret_key = "secretkey"
app.config['MONGO_URI'] = "mongodb://localhost:27017/User"
mongo = PyMongo(app)
@app.route('/add',methods=['POST'])
def add_user():
_json = request.json
_name = _json['name']
_address = _json['address']
_contactno = _json['contact']
_email = _json['email']
if _name and _address and _contactno and _email and request.method == 'POST':
id = mongo.db.user.insert({'name':_name,'address':_address,'contact':_contactno,'email':_email})
resp = jsonify("Contact added sucessfully")
resp.status_code = 200
return resp
else:
return not_found()
@app.route('/users')
def users():
users = mongo.db.user.find()
resp = dumps(users)
return resp
@app.route('/user/<id>')
def user(id):
user = mongo.db.user.find_one({'_id':ObjectId(id)})
resp = dumps(user)
return resp
@app.route('/delete/<id>',methods=['DELETE'])
def delete_user(id):
delete_user = mongo.db.user.delete_one({'_id': ObjectId(id)})
resp = jsonify("Contact deleted successfully")
resp.status_code = 200
return resp
@app.route('/update/<id>', methods =['PUT'])
def update(id):
_id = id
_json = request.json
_name = _json['name']
_address = _json['address']
_contactno = _json['contact']
_email = _json['email']
if _name and _address and _contactno and _email and _id and request.method == 'PUT':
mongo.db.user.update({'_id':ObjectId(_id['$oid']) if '$oid' in _id else ObjectId(_id)}, {'$set': {'name':_name,'address':_address,'contact':_contactno,'email':_email,}})
resp = jsonify("Contact updated Successfully")
resp.status_code = 200
return resp
else:
return not_found()
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message':'Not Found' + request.url
}
resp = jsonify(message)
resp.status_code = 404
return resp
if __name__ =="__main__":
app.run(debug = True)
| [
"noreply@github.com"
] | noreply@github.com |
f031555a692495a482d208cf6100105e71ac4dbc | 79b38e6dad187bed26039f77611cc3feb7d75c1a | /issegm1/solve_ST.py | 70e0b3b0d45f420e221a8fc3e8d48bb954d43064 | [] | no_license | engrjavediqbal/MLSL | aa362c04a47b2bc921331bbb47dd4fe15bdb4bbe | 94ac81096fd6ba2c85352807dc93f6a6b6cc472d | refs/heads/master | 2023-08-04T11:22:13.335469 | 2023-07-25T13:55:41 | 2023-07-25T13:55:41 | 209,766,533 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,503 | py | from __future__ import print_function
from sklearn.datasets import fetch_mldata
import logging
import copy
from datetime import datetime
import argparse
import cPickle
import os
import os.path as osp
import re
import sys
import math
import time
from functools import partial
from PIL import Image
from multiprocessing import Pool
from sklearn.metrics import log_loss
import numpy as np
import mxnet as mx
import scipy.io
from util1 import mxutil
from util1 import transformer as ts
from util1 import util
from util1.lr_scheduler import FixedScheduler, LinearScheduler, PolyScheduler
from data1 import FileIter, make_divisible
#from data_src import FileIter, make_divisible, parse_split_file
def parse_split_file_tgt(dataset_tgt, split_tgt, data_root=''):
split_filename = 'issegm1/data_list/{}/{}.lst'.format(dataset_tgt, split_tgt)
image_list = []
label_gt_list = []
image_data_list = []
with open(split_filename) as f:
for item in f.readlines():
fields = item.strip().split('\t')
image_list.append(os.path.join(data_root, fields[0]))
image_data_list.append(fields[0])
label_gt_list.append(os.path.join(data_root, fields[1]))
return image_list, label_gt_list,image_data_list
def parse_model_label(args):
assert args.model is not None
fields = [_.strip() for _ in osp.basename(args.model).split('_')]
# parse fields
i = 0
num_fields = len(fields)
# database
dataset = fields[i] if args.dataset is None else args.dataset
dataset_tgt = args.dataset_tgt
i += 1
######################## network structure
assert fields[i].startswith('rn')
net_type = re.compile('rn[a-z]*').findall(fields[i])[0]
net_name = fields[i][len(net_type):].strip('-')
i += 1
# number of classes
assert fields[i].startswith('cls')
classes = int(fields[i][len('cls'):])
i += 1
######################## feature resolution
#feat_stride = 32
feat_stride = 8
if i < num_fields and fields[i].startswith('s'):
feat_stride = int(fields[i][len('s'):])
i += 1
# learning rate
lr_params = {
'type': 'fixed',
'base': 0.1,
'args': None,
}
if args.base_lr is not None:
lr_params['base'] = args.base_lr
if args.lr_type in ('linear',):
lr_params['type'] = args.lr_type
elif args.lr_type in ('poly',):
lr_params['type'] = args.lr_type
elif args.lr_type == 'step':
lr_params['args'] = {'step': [int(_) for _ in args.lr_steps.split(',')],
'factor': 0.1}
model_specs = {
# model
'lr_params': lr_params,
'net_type': net_type,
'net_name': net_name,
'classes': classes,
'feat_stride': feat_stride,
# data
'dataset': dataset,
'dataset_tgt': dataset_tgt
}
return model_specs
def parse_args():
parser = argparse.ArgumentParser(description='Tune FCRNs from ResNets.')
parser.add_argument('--dataset', default=None,
help='The source dataset to use, e.g. cityscapes, voc.')
parser.add_argument('--dataset-tgt', dest='dataset_tgt', default=None,
help='The target dataset to use, e.g. cityscapes, GM.')
parser.add_argument('--split', dest='split', default='train',
help='The split to use, e.g. train, trainval.')
parser.add_argument('--split-tgt', dest='split_tgt', default='val',
help='The split to use in target domain e.g. train, trainval.')
parser.add_argument('--data-root', dest='data_root',
help='The root data dir. for source domain',
default=None, type=str)
parser.add_argument('--data-root-tgt', dest='data_root_tgt',
help='The root data dir. for target domain',
default=None, type=str)
parser.add_argument('--output', default=None,
help='The output dir.')
parser.add_argument('--model', default=None,
help='The unique label of this model.')
parser.add_argument('--batch-images', dest='batch_images',
help='The number of images per batch.',
default=None, type=int)
parser.add_argument('--crop-size', dest='crop_size',
help='The size of network input during training.',
default=None, type=int)
parser.add_argument('--origin-size', dest='origin_size',
help='The size of images to crop from in source domain',
default=2048, type=int)
parser.add_argument('--origin-size-tgt', dest='origin_size_tgt',
help='The size of images to crop from in target domain',
default=2048, type=int)
parser.add_argument('--scale-rate-range', dest='scale_rate_range',
help='The range of rescaling',
default='0.7,1.3', type=str)
parser.add_argument('--weights', default=None,
help='The path of a pretrained model.')
parser.add_argument('--gpus', default='0',
help='The devices to use, e.g. 0,1,2,3')
#
parser.add_argument('--lr-type', dest='lr_type',
help='The learning rate scheduler, e.g., fixed(default)/step/linear',
default=None, type=str)
parser.add_argument('--base-lr', dest='base_lr',
help='The lr to start from.',
default=None, type=float)
parser.add_argument('--lr-steps', dest='lr_steps',
help='The steps when to reduce lr.',
default=None, type=str)
parser.add_argument('--weight-decay', dest='weight_decay',
help='The weight decay in sgd.',
default=0.0005, type=float)
#
parser.add_argument('--from-epoch', dest='from_epoch',
help='The epoch to start from.',
default=None, type=int)
parser.add_argument('--stop-epoch', dest='stop_epoch',
help='The index of epoch to stop.',
default=None, type=int)
parser.add_argument('--to-epoch', dest='to_epoch',
help='The number of epochs to run.',
default=None, type=int)
# how many rounds to generate pseudo labels
parser.add_argument('--idx-round', dest='idx_round',
help='The current number of rounds to generate pseudo labels',
default=0, type=int)
# initial portion of selected pseudo labels in target domain
parser.add_argument('--init-tgt-port', dest='init_tgt_port',
help='The initial portion of pixels selected in target dataset, both by global and class-wise threshold',
default=0.3, type=float)
parser.add_argument('--init-src-port', dest='init_src_port',
help='The initial portion of images selected in source dataset',
default=0.3, type=float)
parser.add_argument('--seed-int', dest='seed_int',
help='The random seed',
default=0, type=int)
parser.add_argument('--mine-port', dest='mine_port',
help='The portion of data being mined',
default=0.5, type=float)
#
parser.add_argument('--mine-id-number', dest='mine_id_number',
help='Thresholding value for deciding mine id',
default=3, type=int)
parser.add_argument('--mine-thresh', dest='mine_thresh',
help='The threshold to determine the mine id',
default=0.001, type=float)
parser.add_argument('--mine-id-address', dest='mine_id_address',
help='The address of mine id',
default=None, type=str)
#
parser.add_argument('--phase',
help='Phase of this call, e.g., train/val.',
default='train', type=str)
parser.add_argument('--with-prior', dest='with_prior',
help='with prior',
default='True', type=str)
# for testing
parser.add_argument('--test-scales', dest='test_scales',
help='Lengths of the longer side to resize an image into, e.g., 224,256.',
default=None, type=str)
parser.add_argument('--test-flipping', dest='test_flipping',
help='If average predictions of original and flipped images.',
default=False, action='store_true')
parser.add_argument('--test-steps', dest='test_steps',
help='The number of steps to take, for predictions at a higher resolution.',
default=1, type=int)
#
parser.add_argument('--kvstore', dest='kvstore',
help='The type of kvstore, e.g., local/device.',
default='local', type=str)
parser.add_argument('--prefetch-threads', dest='prefetch_threads',
help='The number of threads to fetch data.',
default=1, type=int)
parser.add_argument('--prefetcher', dest='prefetcher',
help='The type of prefetercher, e.g., process/thread.',
default='thread', type=str)
parser.add_argument('--cache-images', dest='cache_images',
help='If cache images, e.g., 0/1',
default=None, type=int)
parser.add_argument('--log-file', dest='log_file',
default=None, type=str)
parser.add_argument('--check-start', dest='check_start',
help='The first epoch to snapshot.',
default=1, type=int)
parser.add_argument('--check-step', dest='check_step',
help='The steps between adjacent snapshots.',
default=4, type=int)
parser.add_argument('--debug',
help='True means logging debug info.',
default=False, action='store_true')
parser.add_argument('--backward-do-mirror', dest='backward_do_mirror',
help='True means less gpu memory usage.',
default=False, action='store_true')
parser.add_argument('--no-cudnn', dest='no_mxnet_cudnn_autotune_default',
help='True means deploy cudnn.',
default=False, action='store_true')
parser.add_argument('--kc-policy', dest='kc_policy',
help='The kc determination policy, currently only "global" and "cb" (class-balanced)',
default='cb', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.debug:
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
if args.backward_do_mirror:
os.environ['MXNET_BACKWARD_DO_MIRROR'] = '1'
if args.no_mxnet_cudnn_autotune_default:
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
if args.output is None:
if args.phase == 'val':
args.output = osp.dirname(args.weights)
else:
args.output = 'output'
if args.weights is not None:
if args.model is None:
assert '_ep-' in args.weights
parts = osp.basename(args.weights).split('_ep-')
args.model = '_'.join(parts[:-1])
if args.phase == 'train':
if args.from_epoch is None:
assert '_ep-' in args.weights
parts = os.path.basename(args.weights).split('_ep-')
assert len(parts) == 2
from_model = parts[0]
if from_model == args.model:
parts = os.path.splitext(os.path.basename(args.weights))[0].split('-')
args.from_epoch = int(parts[-1])
if args.model is None:
raise NotImplementedError('Missing argument: args.model')
if args.from_epoch is None:
args.from_epoch = 0
if args.log_file is None:
if args.phase == 'train':
args.log_file = '{}.log'.format(args.model)
elif args.phase == 'val':
suffix = ''
if args.split_tgt != 'val':
suffix = '_{}'.format(args.split_tgt)
args.log_file = '{}{}.log'.format(osp.splitext(osp.basename(args.weights))[0], suffix)
else:
raise NotImplementedError('Unknown phase: {}'.format(args.phase))
model_specs = parse_model_label(args)
if args.data_root is None:
args.data_root = osp.join('data', model_specs['dataset'])
return args, model_specs
def get_dataset_specs_tgt(args, model_specs):
dataset = args.dataset
dataset_tgt = args.dataset_tgt
meta = {}
mine_id = None
mine_id_priority = None
mine_port = args.mine_port
mine_th = args.mine_thresh
cmap_path = 'data/shared/cmap.pkl'
cache_images = args.phase == 'train'
mx_workspace = 1650
sys.path.insert(0, 'data/cityscapesscripts/helpers')
if args.phase == 'train':
mine_id = np.load(args.mine_id_address + '/mine_id.npy')
mine_id_priority = np.load(args.mine_id_address + '/mine_id_priority.npy')
mine_th = np.zeros(len(mine_id)) # trainId starts from 0
if dataset == 'gta' and dataset_tgt == 'cityscapes':
from labels import id2label, trainId2label
#
label_2_id_tgt = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id_tgt[l] = id2label[l].trainId
id_2_label_tgt = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
valid_labels_tgt = sorted(set(id_2_label_tgt.ravel()))
id_2_label_src = id_2_label_tgt
label_2_id_src = label_2_id_tgt
valid_labels_src = valid_labels_tgt
#
cmap = np.zeros((256, 3), dtype=np.uint8)
for i in id2label.keys():
cmap[i] = id2label[i].color
#
ident_size = True
#
#max_shape_src = np.array((1052, 1914))
max_shape_src = np.array((1024, 2048))
max_shape_tgt = np.array((1024, 2048))
#
if args.split in ('train+', 'trainval+'):
cache_images = False
#
if args.phase in ('val',):
mx_workspace = 8192
elif dataset == 'synthia' and dataset_tgt == 'cityscapes':
from labels_cityscapes_synthia import id2label as id2label_tgt
from labels_cityscapes_synthia import trainId2label as trainId2label_tgt
from labels_synthia import id2label as id2label_src
label_2_id_src = 255 * np.ones((256,))
for l in id2label_src:
if l in (-1, 255):
continue
label_2_id_src[l] = id2label_src[l].trainId
label_2_id_tgt = 255 * np.ones((256,))
for l in id2label_tgt:
if l in (-1, 255):
continue
label_2_id_tgt[l] = id2label_tgt[l].trainId
id_2_label_tgt = np.array([trainId2label_tgt[_].id for _ in trainId2label_tgt if _ not in (-1, 255)])
valid_labels_tgt = sorted(set(id_2_label_tgt.ravel()))
id_2_label_src = None
valid_labels_src = None
#
cmap = np.zeros((256, 3), dtype=np.uint8)
for i in id2label_tgt.keys():
cmap[i] = id2label_tgt[i].color
#
ident_size = True
#
max_shape_src = np.array((760, 1280))
max_shape_tgt = np.array((1024, 2048))
#
if args.split in ('train+', 'trainval+'):
cache_images = False
#
if args.phase in ('val',):
mx_workspace = 8192
else:
raise NotImplementedError('Unknow dataset: {}'.format(args.dataset))
if cmap is None and cmap_path is not None:
if osp.isfile(cmap_path):
with open(cmap_path) as f:
cmap = cPickle.load(f)
meta['gpus'] = args.gpus
meta['mine_port'] = mine_port
meta['mine_id'] = mine_id
meta['mine_id_priority'] = mine_id_priority
meta['mine_th'] = mine_th
meta['label_2_id_tgt'] = label_2_id_tgt
meta['id_2_label_tgt'] = id_2_label_tgt
meta['valid_labels_tgt'] = valid_labels_tgt
meta['label_2_id_src'] = label_2_id_src
meta['id_2_label_src'] = id_2_label_src
meta['valid_labels_src'] = valid_labels_src
meta['cmap'] = cmap
meta['ident_size'] = ident_size
meta['max_shape_src'] = meta.get('max_shape_src', max_shape_src)
meta['max_shape_tgt'] = meta.get('max_shape_tgt', max_shape_tgt)
meta['cache_images'] = args.cache_images if args.cache_images is not None else cache_images
meta['mx_workspace'] = mx_workspace
return meta
'''def _get_metric():
def _eval_func(label, pred):
# global sxloss
gt_label = label.ravel()
valid_flag = gt_label != 255
labels = gt_label[valid_flag].astype(int)
n,c,h,w = pred.shape
valid_inds = np.where(valid_flag)[0]
probmap = np.rollaxis(pred.astype(np.float32),1).reshape((c, -1))
valid_probmap = probmap[labels, valid_inds]
log_valid_probmap = -np.log(valid_probmap+1e-32)
sum_metric = log_valid_probmap.sum()
num_inst = valid_flag.sum()
return (sum_metric, num_inst + (num_inst == 0))
return mx.metric.CustomMetric(_eval_func, 'loss')'''
class Multi_Accuracy(mx.metric.EvalMetric):
"""Calculate accuracies of multi label"""
def __init__(self, num=None):
self.num = num
super(Multi_Accuracy, self).__init__('multi-accuracy')
def reset(self):
"""Resets the internal evaluation result to initial state."""
self.num_inst = 0 if self.num is None else [0] * self.num
self.sum_metric = 0.0 if self.num is None else [0.0] * self.num
def update(self, labels, preds):
mx.metric.check_label_shapes(labels, preds)
if self.num is not None:
assert len(labels) == self.num
for i in range(len(labels)):
#print ('I am here in accuracy')
#pred_label = mx.nd.argmax_channel(preds[i]).asnumpy().astype('int32')
pred_label = preds[i].asnumpy().astype('float')
label = labels[i].asnumpy().astype('int32')
mx.metric.check_label_shapes(label, pred_label)
if self.num is None:
#self.sum_metric += (pred_label.flat == label.flat).sum()
#self.num_inst += len(pred_label.flat)
outEval = _eval_func(label, pred_label)
self.sum_metric = outEval[0]
self.num_inst = outEval[1]
else:
if i==0:
outEval = _eval_func(label, pred_label)
self.sum_metric[i] = outEval[0]
self.num_inst[i] = outEval[1]
else:
#self.sum_metric[i] = (pred_label.flat == label.flat).sum()
#print(label.shape, pred_label.shape, label, pred_label)
#self.sum_metric[i] = log_loss(label.flat, pred_label.flat)
self.sum_metric[i] = cross_entropy(label.flatten(), pred_label.flatten())
self.num_inst[i] = len(pred_label.flat)
#print self.sum_metric[i], self.num_inst[i]
def get(self):
"""Gets the current evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self.num is None:
return super(Multi_Accuracy, self).get()
else:
return zip(*(('%s-task%d'%(self.name, i), float('nan') if self.num_inst[i] == 0
else self.sum_metric[i] / self.num_inst[i])
for i in range(self.num)))
def get_name_value(self):
"""Returns zipped name and value pairs.
Returns
-------
list of tuples
A (name, value) tuple list.
"""
if self.num is None:
return super(Multi_Accuracy, self).get_name_value()
name, value = self.get()
return list(zip(name, value))
def _eval_func(label, pred):
# global sxloss
gt_label = label.ravel()
valid_flag = gt_label != 255
labels = gt_label[valid_flag].astype(int)
n,c,h,w = pred.shape
valid_inds = np.where(valid_flag)[0]
probmap = np.rollaxis(pred.astype(np.float32),1).reshape((c, -1))
valid_probmap = probmap[labels, valid_inds]
log_valid_probmap = -np.log(valid_probmap+1e-32)
sum_metric = log_valid_probmap.sum()
num_inst = valid_flag.sum()
return (sum_metric, num_inst + (num_inst == 0))
def cross_entropy(targets, predictions):
N = predictions.shape[0]
lo = np.log(predictions+ 1e-6)
#print predictions,lo
ce = -np.sum(targets*lo)/N
return ce
def _get_scalemeanstd():
if model_specs['net_type'] in ('rna',):
return (1.0 / 255,
np.array([0.485, 0.456, 0.406]).reshape((1, 1, 3)),
np.array([0.229, 0.224, 0.225]).reshape((1, 1, 3)))
return None, None, None
def _get_transformer_image():
scale, mean_, std_ = _get_scalemeanstd()
transformers = []
if scale > 0:
transformers.append(ts.ColorScale(np.single(scale)))
transformers.append(ts.ColorNormalize(mean_, std_))
return transformers
def _get_module(args, margs, dargs, net=None):
if net is None:
# the following lines show how to create symbols for our networks
if model_specs['net_type'] == 'rna':
from util1.symbol.symbol import cfg as symcfg
symcfg['lr_type'] = 'alex'
symcfg['workspace'] = dargs.mx_workspace
symcfg['bn_use_global_stats'] = True
if model_specs['net_name'] == 'a1':
from util1.symbol.resnet_v2 import fcrna_model_a1, fcrna_model_a1_1
#net = fcrna_model_a1(margs.classes, margs.feat_stride, bootstrapping=False)
net = fcrna_model_a1_1(margs.classes, margs.feat_stride, bootstrapping=False)
if net is None:
raise NotImplementedError('Unknown network: {}'.format(vars(margs)))
contexts = [mx.gpu(int(_)) for _ in args.gpus.split(',')]
#mod = mx.mod.Module(net, context=contexts)
mod = mx.mod.Module(net, context=contexts, label_names=['softmax_label', 'sigmoid_label'])
return mod
def _make_dirs(path):
if not osp.isdir(path):
os.makedirs(path)
def facc(label, pred):
pred = pred.argmax(1).ravel()
label = label.ravel()
return (pred == label).mean()
def fentropy(label, pred):
pred_source = pred[:, 1, :, :].ravel()
label = label.ravel()
return -(label * np.log(pred_source + 1e-12) + (1. - label) * np.log(1. - pred_source + 1e-12)).mean()
def _interp_preds_as_impl(num_classes, im_size, pred_stride, imh, imw, pred):
imh0, imw0 = im_size
pred = pred.astype(np.single, copy=False)
input_h, input_w = pred.shape[0] * pred_stride, pred.shape[1] * pred_stride
assert pred_stride >= 1.
this_interp_pred = np.array(Image.fromarray(pred).resize((input_w, input_h), Image.CUBIC))
if imh0 == imh:
interp_pred = this_interp_pred[:imh, :imw]
else:
interp_method = util.get_interp_method(imh, imw, imh0, imw0)
interp_pred = np.array(Image.fromarray(this_interp_pred[:imh, :imw]).resize((imw0, imh0), interp_method))
return interp_pred
def interp_preds_as(im_size, net_preds, pred_stride, imh, imw, threads=4):
num_classes = net_preds.shape[0]
worker = partial(_interp_preds_as_impl, num_classes, im_size, pred_stride, imh, imw)
if threads == 1:
ret = [worker(_) for _ in net_preds]
else:
pool = Pool(threads)
ret = pool.map(worker, net_preds)
pool.close()
return np.array(ret)
class ScoreUpdater(object):
def __init__(self, valid_labels, c_num, x_num, logger=None, label=None, info=None):
self._valid_labels = valid_labels
self._confs = np.zeros((c_num, c_num, x_num))
self._pixels = np.zeros((c_num, x_num))
self._logger = logger
self._label = label
self._info = info
@property
def info(self):
return self._info
def reset(self):
self._start = time.time()
self._computed = np.zeros((self._pixels.shape[1],))
self._confs[:] = 0
self._pixels[:] = 0
@staticmethod
def calc_updates(valid_labels, pred_label, label):
num_classes = len(valid_labels)
pred_flags = [set(np.where((pred_label == _).ravel())[0]) for _ in valid_labels]
class_flags = [set(np.where((label == _).ravel())[0]) for _ in valid_labels]
conf = [len(class_flags[j].intersection(pred_flags[k])) for j in xrange(num_classes) for k in
xrange(num_classes)]
pixel = [len(class_flags[j]) for j in xrange(num_classes)]
return np.single(conf).reshape((num_classes, num_classes)), np.single(pixel)
def do_updates(self, conf, pixel, i, computed=True):
if computed:
self._computed[i] = 1
self._confs[:, :, i] = conf
self._pixels[:, i] = pixel
def update(self, pred_label, label, i, computed=True):
conf, pixel = ScoreUpdater.calc_updates(self._valid_labels, pred_label, label)
self.do_updates(conf, pixel, i, computed)
self.scores(i)
def scores(self, i=None, logger=None):
confs = self._confs
pixels = self._pixels
num_classes = pixels.shape[0]
x_num = pixels.shape[1]
class_pixels = pixels.sum(1)
class_pixels += class_pixels == 0
scores = confs[xrange(num_classes), xrange(num_classes), :].sum(1)
acc = scores.sum() / pixels.sum()
cls_accs = scores / class_pixels
class_preds = confs.sum(0).sum(1)
ious = scores / (class_pixels + class_preds - scores)
logger = self._logger if logger is None else logger
if logger is not None:
if i is not None:
speed = 1. * self._computed.sum() / (time.time() - self._start)
logger.info('Done {}/{} with speed: {:.2f}/s'.format(i + 1, x_num, speed))
name = '' if self._label is None else '{}, '.format(self._label)
logger.info('{}pixel acc: {:.2f}%, mean acc: {:.2f}%, mean iou: {:.2f}%'. \
format(name, acc * 100, cls_accs.mean() * 100, ious.mean() * 100))
with util.np_print_options(formatter={'float': '{:5.2f}'.format}):
logger.info('\n{}'.format(cls_accs * 100))
logger.info('\n{}'.format(ious * 100))
return acc, cls_accs, ious
def overall_scores(self, logger=None):
acc, cls_accs, ious = self.scores(None, logger)
return acc, cls_accs.mean(), ious.mean()
def _train_impl(args, model_specs, logger):
if len(args.output) > 0:
_make_dirs(args.output)
# dataiter
dataset_specs_tgt = get_dataset_specs_tgt(args, model_specs)
scale, mean_, _ = _get_scalemeanstd()
if scale > 0:
mean_ /= scale
margs = argparse.Namespace(**model_specs)
dargs = argparse.Namespace(**dataset_specs_tgt)
# number of list_lines
split_filename = 'issegm1/data_list/{}/{}.lst'.format(margs.dataset, args.split)
num_source = 0
with open(split_filename) as f:
for item in f.readlines():
num_source = num_source + 1
#
batches_per_epoch = num_source // args.batch_images
# optimizer
assert args.to_epoch is not None
if args.stop_epoch is not None:
assert args.stop_epoch > args.from_epoch and args.stop_epoch <= args.to_epoch
else:
args.stop_epoch = args.to_epoch
from_iter = args.from_epoch * batches_per_epoch
to_iter = args.to_epoch * batches_per_epoch
lr_params = model_specs['lr_params']
base_lr = lr_params['base']
if lr_params['type'] == 'fixed':
scheduler = FixedScheduler()
elif lr_params['type'] == 'step':
left_step = []
for step in lr_params['args']['step']:
if from_iter > step:
base_lr *= lr_params['args']['factor']
continue
left_step.append(step - from_iter)
model_specs['lr_params']['step'] = left_step
scheduler = mx.lr_scheduler.MultiFactorScheduler(**lr_params['args'])
elif lr_params['type'] == 'linear':
scheduler = LinearScheduler(updates=to_iter + 1, frequency=50,
stop_lr=min(base_lr / 100., 1e-6),
offset=from_iter)
elif lr_params['type'] == 'poly':
scheduler = PolyScheduler(updates=to_iter + 1, frequency=50,
stop_lr=min(base_lr / 100., 1e-8),
power=0.9,
offset=from_iter)
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type='in', magnitude=2)
optimizer_params = {
'learning_rate': base_lr,
'momentum': 0.9,
'wd': args.weight_decay,
'lr_scheduler': scheduler,
'rescale_grad': 1.0 / len(args.gpus.split(',')),
}
data_src_port = args.init_src_port
data_src_num = int(num_source * data_src_port)
mod = _get_module(args, margs, dargs)
addr_weights = args.weights # first weights should be xxxx_ep-0000.params!
addr_output = args.output
# initializer
net_args = None
net_auxs = None
###
if addr_weights is not None:
net_args, net_auxs = mxutil.load_params_from_file(addr_weights)
print ('feat_stride', margs.feat_stride)
####################################### training model
to_model = osp.join(addr_output, str(args.idx_round), '{}_ep'.format(args.model))
dataiter = FileIter(dataset=margs.dataset,
split=args.split,
data_root=args.data_root,
num_sel_source=data_src_num,
num_source=num_source,
seed_int=args.seed_int,
dataset_tgt=args.dataset_tgt,
split_tgt=args.split_tgt,
data_root_tgt=args.data_root_tgt,
sampler='random',
batch_images=args.batch_images,
meta=dataset_specs_tgt,
rgb_mean=mean_,
feat_stride=margs.feat_stride,
label_stride=margs.feat_stride,
origin_size=args.origin_size,
origin_size_tgt=args.origin_size_tgt,
crop_size=args.crop_size,
scale_rate_range=[float(_) for _ in args.scale_rate_range.split(',')],
transformer=None,
transformer_image=ts.Compose(_get_transformer_image()),
prefetch_threads=args.prefetch_threads,
prefetcher_type=args.prefetcher,
)
dataiter.reset()
#ad = dataiter.next()
#label_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in label_shapes]
#print (ad)
mod.fit(
dataiter,
eval_metric=Multi_Accuracy(2),
#eval_metric=_get_metric(),
batch_end_callback=mx.callback.log_train_metric(10, auto_reset=False),
epoch_end_callback=mx.callback.do_checkpoint(to_model),
kvstore=args.kvstore,
optimizer='sgd',
optimizer_params=optimizer_params,
initializer=initializer,
arg_params=net_args,
aux_params=net_auxs,
allow_missing=args.from_epoch == 0,
begin_epoch=args.from_epoch,
num_epoch=args.stop_epoch,
)
# @profile
# MST:
def _val_impl(args, model_specs, logger):
if len(args.output) > 0:
_make_dirs(args.output)
# dataiter
dataset_specs_tgt = get_dataset_specs_tgt(args, model_specs)
scale, mean_, _ = _get_scalemeanstd()
if scale > 0:
mean_ /= scale
#print (model_specs)
margs = argparse.Namespace(**model_specs)
dargs = argparse.Namespace(**dataset_specs_tgt)
mod = _get_module(args, margs, dargs)
addr_weights = args.weights # first weights should be xxxx_ep-0000.params!
addr_output = args.output
# current round index
cround = args.idx_round
net_args = None
net_auxs = None
###
if addr_weights is not None:
net_args, net_auxs = mxutil.load_params_from_file(addr_weights)
######
save_dir = osp.join(args.output, str(cround), 'results')
save_dir_self_train = osp.join(args.output, str(cround), 'self_train')
# pseudo labels
save_dir_pseudo_labelIds = osp.join(save_dir_self_train, 'pseudo_labelIds')
save_dir_pseudo_color = osp.join(save_dir_self_train, 'pseudo_color')
# without sp
save_dir_nplabelIds = osp.join(save_dir, 'nplabelIds')
save_dir_npcolor = osp.join(save_dir, 'npcolor')
# probability map
save_dir_probmap = osp.join(args.output, 'probmap')
save_dir_stats = osp.join(args.output, 'stats')
_make_dirs(save_dir)
_make_dirs(save_dir_pseudo_labelIds)
_make_dirs(save_dir_pseudo_color)
_make_dirs(save_dir_nplabelIds)
_make_dirs(save_dir_npcolor)
_make_dirs(save_dir_probmap)
_make_dirs(save_dir_stats)
if args.with_prior == 'True':
# with sp
save_dir_splabelIds = osp.join(save_dir_self_train, 'splabelIds')
save_dir_spcolor = osp.join(save_dir_self_train, 'spcolor')
_make_dirs(save_dir_splabelIds)
_make_dirs(save_dir_spcolor)
if args.kc_policy == 'cb':
# reweighted prediction map
save_dir_rwlabelIds = osp.join(save_dir_self_train, 'rwlabelIds')
save_dir_rwcolor = osp.join(save_dir_self_train, 'rwcolor')
_make_dirs(save_dir_rwlabelIds)
_make_dirs(save_dir_rwcolor)
######
dataset_tgt = model_specs['dataset_tgt']
image_list_tgt, label_gt_list_tgt,image_tgt_list = parse_split_file_tgt(margs.dataset_tgt, args.split_tgt)
has_gt = args.split_tgt in ('train', 'val',)
crop_sizes = sorted([int(_) for _ in args.test_scales.split(',')])[::-1]
crop_size = crop_sizes[0]
assert len(crop_sizes) == 1, 'multi-scale testing not implemented'
label_stride = margs.feat_stride
x_num = len(image_list_tgt)
do_forward = True
# for all images that has the same resolution
if do_forward:
batch = None
transformers = [ts.Scale(crop_size, Image.CUBIC, False)]
transformers += _get_transformer_image()
transformer = ts.Compose(transformers)
scorer_np = ScoreUpdater(dargs.valid_labels_tgt, margs.classes, x_num, logger)
scorer_np.reset()
# with prior
if args.with_prior == 'True':
scorer = ScoreUpdater(dargs.valid_labels_tgt, margs.classes, x_num, logger)
scorer.reset()
done_count = 0 # for multi-scale testing
num_classes = margs.classes
init_tgt_port = float(args.init_tgt_port)
# class-wise
cls_exist_array = np.zeros([1, num_classes], dtype=int)
cls_thresh = np.zeros([num_classes]) # confidence thresholds for all classes
cls_size = np.zeros([num_classes]) # number of predictions in each class
array_pixel = 0.0
# prior
if args.with_prior == 'True':
in_path_prior = 'spatial_prior/{}/prior_array.mat'.format(args.dataset)
sprior = scipy.io.loadmat(in_path_prior)
prior_array = sprior["prior_array"].astype(np.float32)
#prior_array = np.maximum(prior_array,0)
############################ network forward
for i in xrange(x_num):
start = time.time()
############################ network forward on single image (from official ResNet-38 implementation)
sample_name = osp.splitext(osp.basename(image_list_tgt[i]))[0]
im_path = osp.join(args.data_root_tgt, image_list_tgt[i])
rim = np.array(Image.open(im_path).convert('RGB'), np.uint8)
if do_forward:
im = transformer(rim)
imh, imw = im.shape[:2]
# init
if batch is None:
if dargs.ident_size:
input_h = make_divisible(imh, margs.feat_stride)
input_w = make_divisible(imw, margs.feat_stride)
else:
input_h = input_w = make_divisible(crop_size, margs.feat_stride)
label_h, label_w = input_h / label_stride, input_w / label_stride
test_steps = args.test_steps
pred_stride = label_stride / test_steps
pred_h, pred_w = label_h * test_steps, label_w * test_steps
input_data = np.zeros((1, 3, input_h, input_w), np.single)
input_label = 255 * np.ones((1, label_h * label_w), np.single)
#dataiter_tgt = mx.io.NDArrayIter(input_data, input_label)
input_label2 = np.ones((1, 19), np.single)
label = {'softmax_label':input_label, 'sigmoid_label':input_label2}
dataiter_tgt = mx.io.NDArrayIter(input_data, label)
batch = dataiter_tgt.next()
mod.bind(dataiter_tgt.provide_data, dataiter_tgt.provide_label, for_training=False, force_rebind=True)
if not mod.params_initialized:
mod.init_params(arg_params=net_args, aux_params=net_auxs)
nim = np.zeros((3, imh + label_stride, imw + label_stride), np.single)
sy = sx = label_stride // 2
nim[:, sy:sy + imh, sx:sx + imw] = im.transpose(2, 0, 1)
net_preds = np.zeros((margs.classes, pred_h, pred_w), np.single)
sy = sx = pred_stride // 2 + np.arange(test_steps) * pred_stride
for ix in xrange(test_steps):
for iy in xrange(test_steps):
input_data = np.zeros((1, 3, input_h, input_w), np.single)
input_data[0, :, :imh, :imw] = nim[:, sy[iy]:sy[iy] + imh, sx[ix]:sx[ix] + imw]
batch.data[0] = mx.nd.array(input_data)
mod.forward(batch, is_train=False)
this_call_preds = mod.get_outputs()[0].asnumpy()[0]
if args.test_flipping:
batch.data[0] = mx.nd.array(input_data[:, :, :, ::-1])
mod.forward(batch, is_train=False)
# average the original and flipped image prediction
this_call_preds = 0.5 * (
this_call_preds + mod.get_outputs()[0].asnumpy()[0][:, :, ::-1])
net_preds[:, iy:iy + pred_h:test_steps, ix:ix + pred_w:test_steps] = this_call_preds
interp_preds_np = interp_preds_as(rim.shape[:2], net_preds, pred_stride, imh, imw)
########################### #save predicted labels and confidence score vectors in target domains
# no prior prediction with trainIDs
pred_label_np = interp_preds_np.argmax(0)
# no prior prediction with labelIDs
if dargs.id_2_label_tgt is not None:
pred_label_np = dargs.id_2_label_tgt[pred_label_np]
# no prior color prediction
im_to_save_np = Image.fromarray(pred_label_np.astype(np.uint8))
im_to_save_npcolor = im_to_save_np.copy()
if dargs.cmap is not None:
im_to_save_npcolor.putpalette(dargs.cmap.ravel())
# save no prior prediction with labelIDs and colors
out_path_np = osp.join(save_dir_nplabelIds, '{}.png'.format(sample_name))
out_path_npcolor = osp.join(save_dir_npcolor, '{}.png'.format(sample_name))
im_to_save_np.save(out_path_np)
im_to_save_npcolor.save(out_path_npcolor)
# with prior
if args.with_prior == 'True':
probmap = np.multiply(prior_array,interp_preds_np).astype(np.float32)
elif args.with_prior == 'False':
probmap = interp_preds_np.copy().astype(np.float32)
pred_label = probmap.argmax(0)
probmap_max = np.amax(probmap, axis=0)
############################ save confidence scores of target domain as class-wise vectors
for idx_cls in np.arange(0, num_classes):
idx_temp = pred_label == idx_cls
sname = 'array_cls' + str(idx_cls)
if not (sname in locals()):
exec ("%s = np.float32(0)" % sname)
if idx_temp.any():
cls_exist_array[0, idx_cls] = 1
probmap_max_cls_temp = probmap_max[idx_temp].astype(np.float32)
len_cls = probmap_max_cls_temp.size
# downsampling by rate 4
probmap_cls = probmap_max_cls_temp[0:len_cls:4]
exec ("%s = np.append(%s,probmap_cls)" % (sname, sname))
############################ save prediction
# save prediction probablity map
out_path_probmap = osp.join(save_dir_probmap, '{}.npy'.format(sample_name))
np.save(out_path_probmap, probmap.astype(np.float32))
# save predictions with spatial priors, if sp exist.
if args.with_prior == 'True':
if dargs.id_2_label_tgt is not None:
pred_label = dargs.id_2_label_tgt[pred_label]
im_to_save_sp = Image.fromarray(pred_label.astype(np.uint8))
im_to_save_spcolor = im_to_save_sp.copy()
if dargs.cmap is not None: # save color seg map
im_to_save_spcolor.putpalette(dargs.cmap.ravel())
out_path_sp = osp.join(save_dir_splabelIds, '{}.png'.format(sample_name))
out_path_spcolor = osp.join(save_dir_spcolor, '{}.png'.format(sample_name))
im_to_save_sp.save(out_path_sp)
im_to_save_spcolor.save(out_path_spcolor)
# log information
done_count += 1
if not has_gt:
logger.info(
'Done {}/{} with speed: {:.2f}/s'.format(i + 1, x_num, 1. * done_count / (time.time() - start)))
continue
if args.split_tgt in ('train', 'val'):
# evaluate with ground truth
label_path = osp.join(args.data_root_tgt, label_gt_list_tgt[i])
label = np.array(Image.open(label_path), np.uint8)
if args.with_prior == 'True':
scorer.update(pred_label, label, i)
scorer_np.update(pred_label_np, label, i)
# save target training list
fout = 'issegm1/data_list/{}/{}_training_gpu{}.lst'.format(args.dataset_tgt,args.split_tgt,args.gpus)
fo = open(fout, "w")
for idx_image in range(x_num):
sample_name = osp.splitext(osp.basename(image_list_tgt[idx_image]))[0]
fo.write(image_tgt_list[idx_image] + '\t' + osp.join(save_dir_pseudo_labelIds, '{}.png'.format(sample_name)) + '\n')
fo.close()
############################ kc generation
start_sort = time.time()
# threshold for each class
if args.kc_policy == 'global':
for idx_cls in np.arange(0,num_classes):
tname = 'array_cls' + str(idx_cls)
exec ("array_pixel = np.append(array_pixel,%s)" % tname) # reverse=False for ascending losses and reverse=True for descending confidence
array_pixel = sorted(array_pixel, reverse = True)
len_cls = len(array_pixel)
len_thresh = int(math.floor(len_cls * init_tgt_port))
cls_size[:] = len_cls
cls_thresh[:] = array_pixel[len_thresh-1].copy()
array_pixel = 0.0
if args.kc_policy == 'cb':
for idx_cls in np.arange(0, num_classes):
tname = 'array_cls' + str(idx_cls)
if cls_exist_array[0, idx_cls] == 1:
exec("%s = sorted(%s,reverse=True)" % (tname, tname)) # reverse=False for ascending losses and reverse=True for descending confidence
exec("len_cls = len(%s)" % tname)
cls_size[idx_cls] = len_cls
len_thresh = int(math.floor(len_cls * init_tgt_port))
if len_thresh != 0:
exec("cls_thresh[idx_cls] = %s[len_thresh-1].copy()" % tname)
exec("%s = %d" % (tname, 0.0))
# threshold for mine_id with priority
mine_id_priority = np.nonzero(cls_size / np.sum(cls_size) < args.mine_thresh)[0]
# chosen mine_id
mine_id_all = np.argsort(cls_size / np.sum(cls_size))
mine_id = mine_id_all[:args.mine_id_number]
print(mine_id)
np.save(save_dir_stats + '/mine_id.npy', mine_id)
np.save(save_dir_stats + '/mine_id_priority.npy', mine_id_priority)
np.save(save_dir_stats + '/cls_thresh.npy', cls_thresh)
np.save(save_dir_stats + '/cls_size.npy', cls_size)
logger.info('Kc determination done in %.2f s.', time.time() - start_sort)
############################ pseudo-label generation
for i in xrange(x_num):
sample_name = osp.splitext(osp.basename(image_list_tgt[i]))[0]
sample_pseudo_label_name = osp.join(save_dir_pseudo_labelIds, '{}.png'.format(sample_name))
sample_pseudocolor_label_name = osp.join(save_dir_pseudo_color, '{}.png'.format(sample_name))
out_path_probmap = osp.join(save_dir_probmap, '{}.npy'.format(sample_name))
probmap = np.load(out_path_probmap)
rw_probmap = np.zeros(probmap.shape, np.single)
cls_thresh[cls_thresh == 0] = 1.0 # cls_thresh = 0 means there is no prediction in this class
############# pseudo-label assignment
for idx_cls in np.arange(0, num_classes):
cls_thresh_temp = cls_thresh[idx_cls]
cls_probmap = probmap[idx_cls,:,:]
cls_rw_probmap = np.true_divide(cls_probmap,cls_thresh_temp)
rw_probmap[idx_cls,:,:] = cls_rw_probmap.copy()
rw_probmap_max = np.amax(rw_probmap, axis=0)
pseudo_label = np.argmax(rw_probmap,axis=0)
############# pseudo-label selection
idx_unconfid = rw_probmap_max < 1
idx_confid = rw_probmap_max >= 1
# pseudo-labels with labelID
pseudo_label = pseudo_label.astype(np.uint8)
pseudo_label_labelID = dargs.id_2_label_tgt[pseudo_label]
rw_pred_label = pseudo_label_labelID.copy()
# ignore label assignment, compatible with labelIDs
pseudo_label_labelID[idx_unconfid] = 0
############# save pseudo-label
im_to_save_pseudo = Image.fromarray(pseudo_label_labelID.astype(np.uint8))
im_to_save_pseudocol = im_to_save_pseudo.copy()
if dargs.cmap is not None: # save segmentation prediction with color
im_to_save_pseudocol.putpalette(dargs.cmap.ravel())
out_path_pseudo = osp.join(save_dir_pseudo_labelIds, '{}.png'.format(sample_name))
out_path_colpseudo = osp.join(save_dir_pseudo_color, '{}.png'.format(sample_name))
im_to_save_pseudo.save(out_path_pseudo)
im_to_save_pseudocol.save(out_path_colpseudo)
############# save reweighted pseudo-label in cbst
if args.kc_policy == 'cb':
im_to_save_rw = Image.fromarray(rw_pred_label.astype(np.uint8))
im_to_save_rwcolor = im_to_save_rw.copy()
if dargs.cmap is not None:
im_to_save_rwcolor.putpalette(dargs.cmap.ravel())
out_path_rw = osp.join(save_dir_rwlabelIds, '{}.png'.format(sample_name))
out_path_rwcolor = osp.join(save_dir_rwcolor, '{}.png'.format(sample_name))
# save no prior prediction with labelIDs and colors
im_to_save_rw.save(out_path_rw)
im_to_save_rwcolor.save(out_path_rwcolor)
## remove probmap folder
import shutil
shutil.rmtree(save_dir_probmap)
##
if __name__ == "__main__":
util.cfg['choose_interpolation_method'] = True
args, model_specs = parse_args()
if len(args.output) > 0:
_make_dirs(args.output)
logger = util.set_logger(args.output, args.log_file, args.debug)
logger.info('start with arguments %s', args)
logger.info('and model specs %s', model_specs)
if args.phase == 'train':
_train_impl(args, model_specs, logger)
elif args.phase == 'val':
_val_impl(args, model_specs, logger)
else:
raise NotImplementedError('Unknown phase: {}'.format(args.phase))
| [
"noreply@github.com"
] | noreply@github.com |
a33b2f9f3cd62ddd7189114556f08b0144aad7c6 | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/tenth/rank_2p49_Q.py | c80b9b7c96acce81b347d895d8286c78c576e7d8 | [] | no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '2p49.csv'
identifier = 'Q'
thresholdCoef = 0.1
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/tenth/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/tenth/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Drop features with coefficients below threshold
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs = coefs[np.abs(coefs['coefficients']) < thresholdCoef]
dropList = list(coefs.index)
del coefs
df1.drop(dropList, axis = 1, inplace = True)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"tanemur1@msu.edu"
] | tanemur1@msu.edu |
3095ad9d0178728b8363be5fa150c0ea43e6ecea | 9c902c6bc6ea2cce71195acd5baa8f44ab928eb6 | /pythonapp/imgtxt/admin.py | 0124dec01736c26d6587dbe332000f3719f39cdc | [] | no_license | mogilivishal/Verzeo-OCR-Project | a383b56014e13dfef598a191012fc51dc9579624 | 8b34a6c8b323e0b55c7902f2c4f873a1e4ce04e7 | refs/heads/master | 2022-04-17T20:32:45.724447 | 2020-02-16T17:38:52 | 2020-02-16T17:38:52 | 240,932,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from django.contrib import admin
from .models import Document
admin.site.register(Document) | [
"noreply@github.com"
] | noreply@github.com |
7f9a2d07182faa806f9337f02a6a0ce4035514fd | 0676f6e4d3510a0305d29aa0b1fe740d538d3b63 | /Python/SImplifyPline/CleanUpPolyline.py | 1ce7d7116eb272886ed20d4186ae8a3b571c98fb | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | pgolay/PG_Scripts | f70ffe7e5ca07acd6f4caedc9a9aec566542da7c | 796704a7daa6ac222a40bb02afdb599f74a6b0d4 | refs/heads/master | 2021-01-19T16:53:41.525879 | 2017-02-07T18:26:10 | 2017-02-07T18:26:10 | 2,730,362 | 9 | 1 | null | 2016-12-30T17:58:08 | 2011-11-08T00:04:33 | Python | UTF-8 | Python | false | false | 1,898 | py | import Rhino
import scriptcontext as sc
"""
Cleans up by collapsing tiny segments in a polyline.
"""
def CleanUpPolyline():
while True:
tol = sc.doc.ModelAbsoluteTolerance
if sc.sticky.has_key("PLineSimplifyTol"):
tol = sc.sticky["PLineSimplifyTol"]
go = Rhino.Input.Custom.GetObject()
go.AcceptNumber(True, False)
go.GeometryFilter = Rhino.DocObjects.ObjectType.Curve
opDblTol = Rhino.Input.Custom.OptionDouble(tol)
go.AddOptionDouble("SegmentTolerance",opDblTol)
result = go.Get()
if( go.CommandResult() != Rhino.Commands.Result.Success ):
return
if result == Rhino.Input.GetResult.Object:
if type(go.Object(0).Geometry()) == Rhino.Geometry.PolylineCurve:
curve = go.Object(0).Geometry()
rc, pLine = curve.TryGetPolyline()
pLineId = go.Object(0).ObjectId
else:
sc.doc.Objects.UnselectAll()
sc.doc.Views.Redraw()
print "Sorry, that was not a polyline."
continue
break
elif result == Rhino.Input.GetResult.Option:
tol = opDblTol.CurrentValue
sc.sticky["PLineSimplifyTol"] = tol
continue
elif result == Rhino.Input.GetResult.Number:
tol = go.Number()
sc.sticky["PLineSimplifyTol"] = tol
continue
break
count = pLine.CollapseShortSegments(tol)
if count !=0:
sc.doc.Objects.Replace(pLineId, pLine)
sc.doc.Views.Redraw()
print str(count) + " short segments were collapsed."
else:
print "No short segments were collapsed."
pass
if __name__ == "__main__":
CleanUpPolyline() | [
"noreply@github.com"
] | noreply@github.com |
ede12f3384950d410a2e5b5c0bb5ba2b28076ac9 | 6c67e2ae195521910fd3d8180fc5a70b9f60db81 | /controllers/utils/rtsq_library/rtsq_level.py | bbe95532b2799521638fa5f25075270c273de949 | [
"MIT"
] | permissive | zeroday0619/Real-Time-Delivery-Query-API | be8b7f0cd74e6c8651fc034064f51e6ec20bac17 | fc2f973c205fe453f77ae27dcd99ce3c2e84528d | refs/heads/master | 2020-09-08T01:43:08.857874 | 2019-11-17T22:32:44 | 2019-11-17T22:32:44 | 220,975,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | def level(resp):
"""
Args:
resp:
level:
string
Returns:
[level 1: 배송준비중, 2: 집화완료, 3: 배송중, 4: 지점 도착, 5: 배송출발, 6:배송 완료]
"""
if resp['level'] == 1:
return {
"code": 1,
"level": "배송 준비중"
}
elif resp['level'] == 2:
return {
"code": 2,
"level": "집화 완료"
}
elif resp['level'] == 3:
return {
"code": 3,
"level": "배송중"
}
elif resp['level'] == 4:
return {
"code": 4,
"level": "지점 도착"
}
elif resp['level'] == 5:
return {
"code": 5,
"level": "배송 출발"
}
elif resp['level'] == 6:
return {
"code": 6,
"level": "배송 완료"
}
else:
return {
"code": 0,
"level": "Internal System Error"
}
| [
"noreply@github.com"
] | noreply@github.com |
ee3452616d5ab280c04845cc2164cbdf6db586d2 | 9032e88ca0c90a15b96d2142d2629484cdf469b6 | /py_controls/MemoryManager.py | fd1bc79f0d91f58ce62c4bd3349152244c888609 | [
"MIT"
] | permissive | CyberCrunch/DU_AI_Gov | 856db1db4e67e37ac8c8f05fc096a9bbc50027a8 | a9fcf3b603c39bf0704df172a6745620d1d3c06b | refs/heads/master | 2021-06-20T12:46:35.360703 | 2017-08-08T19:18:14 | 2017-08-08T19:18:14 | 77,530,730 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,088 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 30 15:52:43 2016
@author: robin
"""
import json
from enum import Enum #testing possible enums for readability...(not implemeted)
class NrH(Enum): #human data formtat for Json
name = 0
human = 1
job = 2
status = 3
position = 4
money = 5
class NrL(Enum): #location data formtat for Json
name = 0
location = 1
planet = 2
structure = 3
longitude = 4
latitude = 5
resource = 6
reward = 7
class SpH(Enum): #human string formtat for registration
name = 0
job = 1
class SpL(Enum): #location string formtat for registration
name = 0
planet = 1
structure = 2
longitude = 3
latitude = 4
def regHuman(msg):
splitStr = msg.split()
if(len(splitStr) != 2):
return "Invalid Parameters, please use Format: !reg YourName YourJob"
with open('memoryDB.json', 'r+') as json_file:
json_data = json.load(json_file)
json_data[splitStr[SpH.name.value]] = ['Human', splitStr[SpH.job.value],"idle", "unknownPos", 0]
json_file.seek(0, 0)
json_file.write(json.dumps(json_data, indent=4))
json_file.truncate()
return ("New human registered: " +msg)
def regLocation(msg):
splitStr = msg.split()
if(len(splitStr) != 5):
return ("Invalid Parameters, please use Format: !geodata name planet type longitude latitude")
with open('memoryDB.json', 'r+') as json_file:
json_data = json.load(json_file)
json_data[splitStr[SpL.name.value]] = ['Location', splitStr[SpL.planet.value], splitStr[SpL.structure.value], splitStr[SpL.longitude.value], splitStr[SpL.latitude.value], "default", 0]
json_file.seek(0, 0)
json_file.write(json.dumps(json_data, indent=4))
json_file.truncate()
return ("New location registered: " +msg)
def getDatabase():
with open('memoryDB.json', 'r') as json_file:
json_data = json.load(json_file)
return(json.dumps(json_data, indent=4, sort_keys=True)) | [
"noreply@github.com"
] | noreply@github.com |
6e1066a32d3b678c93a683c91c32ca9925549774 | 72d010d00355fc977a291c29eb18aeb385b8a9b0 | /MPK261/__init__.py | 1878e1129184af07da8510e9e370e01adae46916 | [] | no_license | maratbakirov/AbletonLive10_MIDIRemoteScripts | bf0749c5c4cce8e83b23f14f671e52752702539d | ed1174d9959b20ed05fb099f0461bbc006bfbb79 | refs/heads/master | 2021-06-16T19:58:34.038163 | 2021-05-09T11:46:46 | 2021-05-09T11:46:46 | 203,174,328 | 0 | 0 | null | 2019-08-19T13:04:23 | 2019-08-19T13:04:22 | null | UTF-8 | Python | false | false | 741 | py | # Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/MPK261/__init__.py
# Compiled at: 2018-04-23 20:27:04
from __future__ import absolute_import, print_function, unicode_literals
from .MPK261 import MPK261
from _Framework.Capabilities import controller_id, inport, outport, CONTROLLER_ID_KEY, PORTS_KEY, NOTES_CC, SCRIPT, REMOTE
def get_capabilities():
return {CONTROLLER_ID_KEY: controller_id(vendor_id=2536, product_ids=[
37], model_name='MPK261'),
PORTS_KEY: [
inport(props=[NOTES_CC, SCRIPT, REMOTE]),
outport(props=[SCRIPT, REMOTE])]}
def create_instance(c_instance):
return MPK261(c_instance)
| [
"julien@julienbayle.net"
] | julien@julienbayle.net |
130a1da7648c1cb9b3d0bdc2b94793d83b2e1729 | 999a7707806f941d334170e9909a268d102929b2 | /yelpCNN.py | 3057ac376eecfe679a7625817028c878379593e2 | [] | no_license | wanaaaa/yelpCNN1D | 7e089ab4ca60e3cf478a6d5b0a5a3b3e80253ba4 | 2f1f1ad9b8101d7a52f2f3c4d01d92e3f197b19b | refs/heads/main | 2023-02-12T20:54:31.046391 | 2021-01-10T18:12:19 | 2021-01-10T18:12:19 | 328,447,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | # https://chriskhanhtran.github.io/posts/cnn-sentence-classification/
from functionClass import *
from gensim.models import Word2Vec
import torch
import torch.optim as optim
device = 'cuda'
rateReviewTrainList, rateReviewTestList, maxListCount = dataRead()
xyDataLoader = DataLoaderFun(rateReviewTrainList, maxListCount, batchSize=2500)
textCNNmodel = trainFun(xyDataLoader, maxListCount, epochs=20)
# textCNNmodel = TextCnn(maxListCount).cuda(device=device)
textCNNmodel = TextCnn(maxListCount).cpu()
textCNNmodel.load_state_dict(torch.load('traindTextCNNmodel.model'))
textCNNmodel.eval()
# ================================================
# ================================================
# ================================================
xyTestDataLoader = DataLoaderFun(rateReviewTestList, maxListCount, batchSize=1)
for epoch in range(1):
# print("num of epochs->", epoch)
for step, batch in enumerate(xyTestDataLoader):
x_test, y_test = tuple(t.to('cpu') for t in batch)
y_pridict = textCNNmodel(x_test)
print("y_pridict->", y_pridict, 'y_test->', y_test)
# break
torch.cuda.empty_cache() | [
"noreply@github.com"
] | noreply@github.com |
2be33a204326b77eed20224274574b433213be6a | 73501b9e3623c3a9338306dbe52d1d89700f3d91 | /upload_this_on_arduino/pyduino.py | 2e4bf4eb623b2c987b4a395798e2605767cf5739 | [] | no_license | rouanro/PS | 72af2d8f5f3d1c628b8ad599c244235781b04c61 | a474d5ac9d23d50388c1811ddf256efa408b33d6 | refs/heads/master | 2020-03-18T21:57:12.402332 | 2018-05-29T15:19:15 | 2018-05-29T15:19:15 | 135,315,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,357 | py | """
A library to interface Arduino through serial connection
"""
import serial
import smtplib
from email.message import EmailMessage
class Arduino():
"""
Models an Arduino connection
"""
def __init__(self, serial_port='/dev/ttyACM0', baud_rate=9600,
read_timeout=5):
"""
Initializes the serial connection to the Arduino board
"""
self.conn = serial.Serial(serial_port, baud_rate)
self.conn.timeout = read_timeout # Timeout for readline()
def set_pin_mode(self, pin_number, mode):
"""
Performs a pinMode() operation on pin_number
Internally sends b'M{mode}{pin_number} where mode could be:
- I for INPUT
- O for OUTPUT
- P for INPUT_PULLUP MO13
"""
# command = (''.join(('M',mode,str(pin_number)))).encode()
#print 'set_pin_mode =',command,(''.join(('M',mode,str(pin_number))))
# self.conn.write(command)
def digital_read(self, pin_number):
"""
Performs a digital read on pin_number and returns the value (1 or 0)
Internally sends b'RD{pin_number}' over the serial connection
"""
command = (''.join(('RD', str(pin_number)))).encode()
#self.conn.write(command)
line_received = self.conn.readline().decode().strip()
header, value = line_received.split(':') # e.g. D13:1
if header == ('D'+ str(pin_number)):
# If header matches
return int(value)
def digital_write(self, pin_number, digital_value):
"""
Writes the digital_value on pin_number
Internally sends b'WD{pin_number}:{digital_value}' over the serial
connection
"""
command = (''.join(('WD', str(pin_number), ':',
str(digital_value)))).encode()
#self.conn.write(command)
def analog_read(self, pin_number):
"""
Performs an analog read on pin_number and returns the value (0 to 1023)
Internally sends b'RA{pin_number}' over the serial connection
"""
command = (''.join(('RA', str(pin_number)))).encode()
self.conn.write(command)
print(command)
line_received = self.conn.readline().decode().strip()
#header, value = line_received.split(':') # e.g. A4:1
if line_received[0:2] == ("A0"):
value = line_received[3:]
# If header matches
return int(value)
if line_received[0:2] == ("A4"):
value = line_received[3:]
return value
# me == the sender's email address
# you == the recipient's email address
# msg = EmailMessage()
# msg['Subject'] = 'Teeeeeeeeeeest'
# msg['From'] = 'benimagh@yahoo.com'
# msg['To'] = 'beniaminmaghis@gmail.com'
# Send the message via our own SMTP server.
# s = smtplib.SMTP('localhost')
# s.send_message(msg)
# s.quit()
def analog_write(self, pin_number, analog_value):
"""
Writes the analog value (0 to 255) on pin_number
Internally sends b'WA{pin_number}:{analog_value}' over the serial
connection
"""
command = (''.join(('WA', str(pin_number), ':',
str(analog_value)))).encode()
#self.conn.write(command)
def send_message(self, message):
command = message.encode()
self.conn.write(command)
def send_email(self, user, pwd, recipient, subject, body):
FROM = user
TO = recipient if isinstance(recipient, list) else [recipient]
SUBJECT = subject
TEXT = body
# Prepare actual message
message = """From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(user, pwd)
server.sendmail(FROM, TO, message)
server.close()
print('successfully sent the mail')
except:
print("failed to send mail")
def close(self):
"""
To ensure we are properly closing our connection to the
Arduino device.
"""
self.conn.close()
print ('Connection to Arduino closed')
| [
"noreply@github.com"
] | noreply@github.com |
530f4767b7bb69cd945bd97def72737f1ad66039 | 7da328d5365788bec00b62e3c3de8b5133fba092 | /impala/tests/test_impala.py | 8c58516171a9ff74ed847675759c70ca285b5840 | [
"Apache-2.0"
] | permissive | attilajeges/impyla | f7520677e426f42e60ecf9199d8dacd38eae1b99 | 35297fd573bd8d8984f89eec91f12dbb1837549a | refs/heads/master | 2023-07-15T17:15:48.683389 | 2020-10-01T23:10:16 | 2020-10-01T23:10:16 | 260,346,345 | 0 | 0 | Apache-2.0 | 2020-05-01T00:18:06 | 2020-05-01T00:18:06 | null | UTF-8 | Python | false | false | 2,025 | py | # Copyright 2019 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from pytest import yield_fixture
BIGGER_TABLE_NUM_ROWS = 100
@yield_fixture(scope='module')
def bigger_table(cur):
table_name = 'tmp_bigger_table'
ddl = """CREATE TABLE {0} (s string)
STORED AS PARQUET""".format(table_name)
cur.execute(ddl)
dml = """INSERT INTO {0}
VALUES {1}""".format(table_name,
",".join(["('row{0}')".format(i) for i in xrange(BIGGER_TABLE_NUM_ROWS)]))
# Disable codegen and expr rewrites so query runs faster.
cur.execute("set disable_codegen=1")
cur.execute("set enable_expr_rewrites=0")
cur.execute(dml)
try:
yield table_name
finally:
cur.execute("DROP TABLE {0}".format(table_name))
def test_has_more_rows(cur, bigger_table):
"""Test that impyla correctly handles empty row batches returned with the
hasMoreRows flag."""
# Set the fetch timeout very low and add sleeps so that Impala will return
# empty batches. Run on a single node with a single thread to make as predictable
# as possible.
cur.execute("set fetch_rows_timeout_ms=1")
cur.execute("set num_nodes=1")
cur.execute("set mt_dop=1")
cur.execute("""select *
from {0}
where s != cast(sleep(2) as string)""".format(bigger_table))
expected_rows = [("row{0}".format(i),) for i in xrange(BIGGER_TABLE_NUM_ROWS)]
assert sorted(cur.fetchall()) == sorted(expected_rows)
| [
"noreply@github.com"
] | noreply@github.com |
bb452e72141b555c7dd30f34a66fc3fe30f86fbd | 220a2a22f7ecbb960e6a09b1153ec5094aef15f5 | /Log-Parsers/Recognition_Long_Talks/general_classes.py | a374a5df875af86c516cbe3be40426c999673ee0 | [] | no_license | jrweis01/Rubidium | 89b27b8376891b42eb6b8bf952f70d92dd81768c | 6050241aa19401bd5196939aadfc4a095f771d0a | refs/heads/master | 2020-05-30T05:29:11.649283 | 2019-06-02T07:03:19 | 2019-06-02T07:03:19 | 189,561,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,156 | py | from templates_data import *
import openpyxl
import os
import sys
import shutil
import datetime
class Utils(object):
def fetch_files_from_folder(self, pathToFolder):
_pathToFiles = []
_fileNames = []
for dirPath, dirNames, fileNames in os.walk(pathToFolder):
selected_path = [os.path.join(dirPath, item) for item in fileNames]
_pathToFiles.extend(selected_path)
selectedFile = [item for item in fileNames]
_fileNames.extend(selectedFile)
# Try to remove empty entries if none of the required files are in directory
try:
_pathToFiles.remove('')
_imageFiles.remove('')
except ValueError:
pass
# Warn if nothing was found in the given path
if selectedFile == []:
print
'No files with given parameters were found in:\n', dirPath, '\n'
print
len(_fileNames), 'files were found is searched folder(s)'
return _pathToFiles, _fileNames
def get_excel_worksheet(self):
pass
@staticmethod
def insertion_sort(items):
for i in range(1, len(items)):
j = i
while j > 0 and items[j] > items[j - 1]:
items[j - 1], items[j] = items[j], items[j - 1]
j = j - 1
return items
def sort_order_dict(self,order_dict):
for key in order_dict:
items = order_dict[key]
items = self.insertion_sort(items)
def sorting_headers(self,sorting_dict,order_dict):
sorted_list = []
for m in order_dict["noise_file_name"]:
for i in order_dict["trig_to_ASR_delay"]:
for j in order_dict["signal_dB"]:
for k in order_dict["noise_dB"]:
for key in sorting_dict:
if (sorting_dict[key]["noise_file_name"] == str(m) and
sorting_dict[key]["trig_to_ASR_delay"] == str(int(i)) and
sorting_dict[key]["signal_dB"] == str(int(j)) and
sorting_dict[key]["noise_dB"] == str(int(k))):
sorted_list.append(key)
return sorted_list
def clear_dict_values(self,dict):
for key in dict:
dict[key].clear()
def get_folder_location_path(self,folder):
program_path = os.path.dirname(sys.argv[0])
template_path = program_path + '\\' + folder
return template_path
class ExcelHandler(object):
def __init__(self, workbook_name):
self.wb_name = workbook_name
self.wb_name_with_dt = self._creat_new_excel_from_template_with_name_and_datetime(workbook_name)
self.wb = openpyxl.load_workbook(str(self.wb_name_with_dt))
self.template_info = {}
self.template_indexes = {'TRIG_ONLY': 4, 'MP_mTRIG_sASR': 4 ,'LJ_sTRIG_mASR' : 4}
self.sheet_MP = None
self.sheet_trig_only = None
self.sheet_LJ_sTRIG_mASR = None
def run_log_printing_LJ_sTRIG_mASR(self,log_dict):
''' for 'LJ_sTRIG_mASR' SHEET TEMPLATE'''
asr_section = log_dict['asr_results_dict']
trig_section = log_dict['trig_results_dict_format']
if self.sheet_LJ_sTRIG_mASR is None:
self.sheet_LJ_sTRIG_mASR = self._open_sheet('LJ_sTRIG_mASR')
ROW = self.template_indexes['LJ_sTRIG_mASR']
''' printing header section'''
self._write_line_to_excel_sheet(self.sheet_LJ_sTRIG_mASR, ROW, 1, log_dict,EXCEL_LJ_sTRIG_mASR_TEMPLATE_HEADER_SECTION)
''' printing trig section'''
self._write_line_to_excel_sheet(self.sheet_LJ_sTRIG_mASR,ROW,27,trig_section,EXCEL_LJ_sTRIG_mASR_TEMPLATE_TRIG_SECTION)
''' printing asr section'''
cmd_template_order = ['volume_down' , 'volume_up' , 'next_song', 'pause' , 'resume', 'what_distance_have_i_done']
cmd_template_dict = {'volume_down': 'empty1.wav' , 'volume_up' : 'empty2.wav' , 'next_song' : 'empty3.wav', 'pause' : 'empty4.wav',
'resume' : 'empty5.wav' , 'what_distance_have_i_done' : 'empty6.wav'}
for command in cmd_template_order:
curr_key = cmd_template_dict[command]
if curr_key in asr_section.keys():
curr_cmd_dict = asr_section[curr_key]
self._write_line_to_excel_sheet(self.sheet_LJ_sTRIG_mASR, ROW, 10, curr_cmd_dict,
EXCEL_LJ_sTRIG_mASR_TEMPLATE_ASR_SECTION)
else:
pass
ROW += 1
self.template_indexes['LJ_sTRIG_mASR']+=6
def run_log_printing_TRIG_ONLY(self,log_dict,exl_tab_name):
''' for 'TRIG_ONLY' SHEET TEMPLATE'''
if self.sheet_trig_only is None:
self.sheet_trig_only = self._open_sheet(exl_tab_name)
ROW = self.template_indexes[exl_tab_name]
self._write_line_to_excel_sheet(self.sheet_trig_only,ROW,1,log_dict,EXCEL_TRIG_TEMPLATE_TUPLE)
self.template_indexes[exl_tab_name] += 1
def run_log_printing_TRIG_ASR_MP(self,log_dict):
''' for 'MP_mTrig_sASR' SHEET TEMPLATE'''
if self.sheet_MP is None:
self.sheet_MP = self._open_sheet("MP_mTRIG_sASR")
ROW = self.template_indexes["MP_mTRIG_sASR"]
self._write_line_to_excel_sheet(self.sheet_MP,ROW,1,log_dict,EXCEL_MP_CMD_TEMPLATE)
self.template_indexes['MP_mTRIG_sASR']+=1
def get_new_wb_name(self):
return self.wb_name_with_dt
def _creat_new_excel_from_template_with_name_and_datetime(self,project_name):
program_path = os.path.dirname(sys.argv[0])
template_path = program_path + '\\template\exl.xlsx'
shutil.copy2(str(template_path), str(program_path))
date_time = datetime.datetime.strftime(datetime.datetime.now(), '_%Y-%m-%d__%H_%M_%S')
exl_file_name = str(project_name) + str(date_time) + ".xlsx"
os.rename("exl.xlsx", str(exl_file_name))
return str(exl_file_name)
def _write_line_to_excel_sheet(self,sheet,row,column,val_dict,template_list):
row = str(row)
start_col = column
for i, key in enumerate(template_list):
col = self._num_to_excel_alphabeit_colms(i+start_col)
try:
# sheet[col + row] = str(val_dict[key])
sheet[col + row] = val_dict[key]
except : print key
def _open_sheet(self,sheet_name):
sheet = self.wb.get_sheet_by_name(sheet_name)
return sheet
def _num_to_excel_alphabeit_colms(self,index_num):
cal1 = index_num % 27
cal2 = index_num // 26
new = index_num - cal2 * 26
if new == 0:
new = 26
cal2 -= 1
if cal2:
mychar = chr(cal2 + 64) + chr(new + 64)
else:
mychar = chr(index_num + 64)
return mychar
def save_workbook(self):
self.wb.save(str(self.wb_name_with_dt))
| [
"noreply@github.com"
] | noreply@github.com |
c45e6ce9c846d77c6611d7c5fa1d641c22336a01 | 4b8c81f54cc52e096ad9ae751f00e88254aab0ca | /20-01-21 while홀.py | 631fadc6b7eb53e75d2df8df8fc563a8e1db0e4e | [] | no_license | dlatpdbs/python | 50305cfcc92bb6c9bae409ec31ebd9e4aa868075 | 2f740941fe1ef172d40cb10a63c1ed19c5925e68 | refs/heads/main | 2022-12-27T15:24:31.243739 | 2020-10-14T05:26:32 | 2020-10-14T05:26:32 | 301,933,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py |
q=1
while q <=100:
print(q)
q=q+2
| [
"noreply@github.com"
] | noreply@github.com |
9abb3baada0faed6fe83d3c15b41aa7c7958cb80 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_27357.py | 1163c19de3fb005d7b6fa68a6a453f6f2e63147f | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | # pyplot.savefig with empty export
plt.show()
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
c7a6bbfb9e4f4606a0720e7f9c0efa56e7d90f30 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/DataQuality/DataQualityConfigurations/python/TCTDisplay.py | 6fa11e45427f043ea1f2b19da409200372d1fc14 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,330 | py | # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
from DataQualityUtils.DQWebDisplayConfig import DQWebDisplayConfig
dqconfig = DQWebDisplayConfig()
dqconfig.config = "TCT"
dqconfig.hcfg = "/afs/cern.ch/user/a/atlasdqm/dqmdisk/tier0/han_config/Collisions/collisions_run.1.41.hcfg"
dqconfig.hcfg_min10 = "/afs/cern.ch/user/a/atlasdqm/dqmdisk/tier0/han_config/Collisions/collisions_minutes10.1.9.hcfg"
dqconfig.hcfg_min30 = "/afs/cern.ch/user/a/atlasdqm/dqmdisk/tier0/han_config/Collisions/collisions_minutes30.1.5.hcfg"
dqconfig.hanResultsDir = "/afs/cern.ch/atlas/offline/external/FullChainTest/tier0/dqm/han_results"
dqconfig.htmlDir = "/afs/cern.ch/atlas/offline/external/FullChainTest/tier0/dqm/www"
dqconfig.htmlWeb = "http://atlas-project-fullchaintest.web.cern.ch/atlas-project-FullChainTest/tier0/dqm/www"
dqconfig.runlist = "runlist_TCT.xml"
dqconfig.indexFile = "results_TCT.html"
dqconfig.lockFile = "DQWebDisplay_TCT.lock"
dqconfig.dbConnection = "sqlite://;schema=MyCOOL_histo.db;dbname=OFLP200"
dqconfig.dqmfOfl = "/GLOBAL/DETSTATUS/DQMFOFL"
dqconfig.dbConnectionHisto = "sqlite://;schema=MyCOOL_histo.db;dbname=OFLP200"
dqconfig.dqmfOflHisto = "/GLOBAL/DETSTATUS/DQMFOFLH"
dqconfig.dbTagName = "DetStatusDQMFOFL-TCT"
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
1533905896294b79dff04e1b69b2cda7c0496874 | fa1dc1d0d2a169326c97dab863e15403bbd6bdbd | /CS486-686_A2Q2ANN.py | c52223b2857731732b02c8b7a75ccd93868316f2 | [
"MIT"
] | permissive | mojivalipour/nnscratch | f07b893f7ac9792f5c9bb8e8ca5c664e392b6786 | 5e0b7f100d1057fab2c166df5696163634acd726 | refs/heads/master | 2022-11-18T11:43:15.553593 | 2020-07-17T05:19:10 | 2020-07-17T05:19:10 | 271,581,705 | 3 | 8 | null | null | null | null | UTF-8 | Python | false | false | 21,331 | py | #!/usr/bin/env python
# coding: utf-8
# Design and Programming by Lead TA: Mojtaba Valipour @ Data Analytics Lab - UWaterloo.ca
# COURSE: CS 486/686 - Artificial Intelligence - University of Waterloo - Spring 2020 - Alice Gao
# Please let me know if you find any bugs in the code: m5valipo@uwaterloo.ca
# The code will be available at https://github.com/mojivalipour/nnscratch
# Version: 0.9.0
# Implement a neural network from scratch
''' Sources:
- http://neuralnetworksanddeeplearning.com/chap2.html
'''
print('Life is easy, you just need to do your best to find your place!')
# Libraries
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from sklearn import datasets
from sklearn.manifold import TSNE # visualization for data with more than two features
from os import path
import pandas as pd
import csv
import copy
import random
# Helper functions
def fixSeed(seed=1010):
np.random.seed(seed)
random.seed(seed)
# The hyper-parameters for the neural network
nSamples = None # use None if you want to use full sample size
# frogsSmall is the same dataset in Q1 that you have to use for comparision
dataset = '2moons' # 2moons/frogsSmall/frogs
noise = 0.05 # Noise in artificial datasets
visNumSamples = 500 # number of samples to visualize
# for regression, we use mean squared error.
# for classification, we use cross entropy.
# for now only mse is supported!
lossFunction = 'mse'
gdMethod = 'batch' # batch gradient descent method
batchSize = 64 # only for minibatch gradient descent
numEpochs = 200 # number of epochs
learningRate = [0.5,0.05,0.005] # learning rates
# for now only relu and sigmoid is supported
lastActivationFunc = 'sigmoid' # relu/sigmoid/softmax
# last layer activation function, this one is important
# because we need to use it for classification later
crossValidationFlag = True # if you like to run cross validation, set this flag to True
kFold = 3 # k-fold cross validation, at least need to be 2
seed = 6565 # Do not change the seed for Assignment
fixSeed(seed=seed) # fix the seed of random generator to make sure comparision is possible
# Some Useful Notes for those students who are interested to know more:
'''
- Neural networks are prone to overfitting. Increasing the number of parameters
could lead to models that have complexity bigger than data.
- Regularization, Normalization and Dropout are popular solutions to overfitting!
- In a neural network, we usually use the softmax function as last layer
activation for multi-class classification and sigmoid for single class
classification.
- For regression problems, we usually use Relu as last layer activation function
and MSE as the loss function that we want to minimize.
- Cross-entropy is the most useful loss function for multi-class classification.
- Sometimes we need to use multiple neurons in the output layer, which means
that we consider a neuron for each class. In this case, we need to use
one-hot vectors to encode the labels.
- Weight initialization is important! Gradient descent is not robust to
weight initialization! Xavier initialization is the most popular method
to initialize weights in neural networks.
'''
# Load data
colorBox = ['#377eb8','#FA0000','#344AA7', '#1EFA39','#00FBFF','#C500FF','#000000','#FFB600']
if dataset == '2moons':
nSamples = 1000 if nSamples is None else nSamples
X,y = datasets.make_moons(n_samples=nSamples, noise=noise, random_state=seed)
numSamples, numFeatures, numClasses = X.shape[0], X.shape[1], 2
# shuffle X,y
idxList = list(range(nSamples))
random.shuffle(idxList) # inplace
X, y = X[idxList,:], y[idxList]
elif dataset == 'frogsSmall' or dataset == 'frogs':
if dataset == 'frogs':
# original dataset
name = 'Frogs_MFCCs.csv'
else:
# a small subset of frogs original dataset, same as A2Q1
name = 'frogs-small.csv'
# check if we already have the file in the directory
if not path.isfile(name):
# otherwise ask user to upload it
print("Please put this {} file in the current directory using choose files ...".format(name))
# just load the csv file
X = pd.read_csv(name, sep=',')
X["Family"] = X["Family"].astype('category')
X["FamilyCat"] = X["Family"].cat.codes # added to the last column
X, y = X.iloc[:,0:22].to_numpy(), X.iloc[:,-1].to_numpy()
nSamples = X.shape[0] if nSamples is None else nSamples
X, y = X[:nSamples,:], y[:nSamples] # filter number of samples
numSamples, numFeatures, numClasses = X.shape[0], X.shape[1], len(np.unique(y))
print('#INFO: N (Number of Samples): {}, D (Number of Features): {}, C (Number of Classes): {}'.format(numSamples, numFeatures, numClasses))
plt.figure()
# if y min is not zero, make it zero
y = y - y.min()
assert y.min() == 0
# sample required sample for visualization
indices = list(range(numSamples))
selectedIndices = np.random.choice(indices, visNumSamples)
colors = [colorBox[y[idx]] for idx in selectedIndices]
if numFeatures == 2:
XR = X[selectedIndices, :]
else:
# use tsne to reduce dimensionality for visualization
XR = TSNE(n_components=2).fit_transform(X[selectedIndices,:])
plt.scatter(XR[:, 0], XR[:, 1], s=10, color=colors)
plt.savefig('dataset.png')
if len(y.shape) < 2:
y = np.expand_dims(y,-1) # shape of y should be N x 1
# Define the network structure
# # 2-Layer Network
# config = {
# # Layer Name: [Number of Nodes (in and out), Bias, Activation Function]
# 'Hidden Layer 0': [[numFeatures, 30], True, 'relu'], # w1
# 'Fully Connected': [[30, 1], True, lastActivationFunc] # w2
# }
# overfit network example
config = {
# Layer Name: [Number of Nodes (in and out), Bias, Activation Function]
'Hidden Layer 0': [[numFeatures, 1000], True, 'sigmoid'], # w1
'Fully Connected': [[1000, 1], True, lastActivationFunc] # w2
}
# 3-Layer Network
# config = {
# # Layer Name: [Number of Nodes (in and out), Bias, Activation Function]
# 'Hidden Layer 0': [[numFeatures, 3], True, 'sigmoid'], # w1
# 'Hidden Layer 1': [[3, 5], True, 'sigmoid'], # w2
# 'Fully Connected': [[5, 1], True, lastActivationFunc] # w2
# }
# 4-layer Network
# config = {
# # Layer Name: [Number of Nodes (in and out), Bias, Activation Function]
# 'Hidden Layer 0': [[numFeatures, 100], True, 'relu'], # w1
# 'Hidden Layer 1': [[100, 50], True, 'relu'], # w2
# 'Hidden Layer 2': [[50, 5], True, 'relu'], # w3
# 'Fully Connected': [[5, 1], True, lastActivationFunc] # w4
# }
# Fully Connected Neural Network Class
class neuralNetwork():
# initializing network
def __init__(self, config=None, numClass=2, learningRate=0.005,
numEpochs=10, batchSize= 64, lossFunction='mse'):
self.config = config
self.configKeyList = list(self.config.keys())
self.lossFunction = lossFunction
self.numLayers = len(self.config)
self.layers = {}
self.layerShapes = {}
self.learningRate = learningRate
self.numEpochs = numEpochs
self.loss = []
self.lossT = []
self.acc = []
self.accT = []
self.batchSize = batchSize
self.numClass = numClass
self.initWeights()
# random init
def initWeights(self):
self.loss = []
self.lossT = []
self.acc = []
self.accT = []
if self.config != None:
for key in config:
# w is parameters, b is bias, a is activation function
self.layers[key] = {'W':np.random.randn(self.config[key][0][0],
self.config[key][0][1])/np.sqrt(self.config[key][0][1]),
'b':np.random.randn(self.config[key][0][1],
) if self.config[key][1]==True else [], 'a':self.config[key][2]}
# keep track of shape only for better understanding
self.layerShapes[key] = {'IS':self.config[key][0][0],'OS':self.config[key][0][1],
'NP':np.prod(self.layers[key]['W'].shape)+len(self.layers[key]['b'])}
else:
raise '#Err: Make sure you set a configuration correctly!'
# activation functions
def relu(self, X):
return np.maximum(0, X)
def sigmoid(self, X):
#TODO: fix the overflow problem in Numpy exp function
return 1./(1. + np.exp(-X))
def activationFunc(self, X, type='sigmoid'):
if type == 'sigmoid':
return self.sigmoid(X)
elif type == 'relu':
return self.relu(X)
elif type == 'None':
return X # do nothing
else:
raise '#Err: Not implemented activation function!'
# objective/loss/cost functions
def mse(self, y, yPred): # mean square error
return np.mean(np.power(y-yPred,2))
def lossFunc(self, y, yPred, type='mse'):
if type == 'mse':
return self.mse(y, yPred)
else:
raise '#Err: Not implemented objective function!'
# back-propagation learning
# forward pass
def forward(self, X):
# apply a(W.T x X + b) for each layer
for key in config:
#print(X.shape, self.layers[key]['W'].shape)
# save input of each layer for backward pass
self.layers[key]['i'] = X
z = np.dot(X, self.layers[key]['W'])
z = z + self.layers[key]['b'] if len(self.layers[key]['b'])!=0 else z
# save middle calculation for backward pass
self.layers[key]['z'] = z
X = self.activationFunc(z, type=self.layers[key]['a'])
# save middle calculation for backward pass
self.layers[key]['o'] = X
return X # yPred
# backward pass
def backward(self, y, yPred):
# derivative of sigmoid
def sigmoidPrime(x):
return self.sigmoid(x) * (1-self.sigmoid(x))
# derivative of relu
def reluPrime(x):
return np.where(x <= 0, 0, 1)
def identity(x):
return x
#TODO: It's not necessary to use double for,
# it is possible to implement faster and more efficient version
# for each parameter (weights and bias) in each layer
for idx, key in enumerate(config):
# calculate derivatives
if self.layers[key]['a'] == 'sigmoid':
fPrime = sigmoidPrime
elif self.layers[key]['a'] == 'relu':
fPrime = reluPrime
elif self.layers[key]['a'] == 'softmax':
fPrime = softmaxPrime
else: # None
fPrime = identity
deWRTdyPred = -(y-yPred) if self.lossFunction == 'mse' else 1 # de/dyPred
# print('de/dy')
# dyPred/dyPredBeforeActivation # in case of sigmoid g(x) x (1-g(x))
dyPredWRTdyPredPre = fPrime(self.layers[self.configKeyList[-1]]['o'])
# print('dy/dz')
# element wise multiplication/ hadamard product
delta = np.multiply(deWRTdyPred, dyPredWRTdyPredPre)
for idxW in range(len(config),idx,-1): # reverse
if idxW-1 == idx:
# calculating the derivative for the last one is different
# because it is respected to that specific weight
#print('\nWeights of layer',idx)
deltaB = delta
dxWRTdW = self.layers[key]['i'].T # dxWRTdW
delta = np.dot(dxWRTdW,delta)
#print('dz/dw')
else:
# this loop is depended to the number of layers in the configuration
# print('\nWeights of layer',idxW-1)
# the weights of current layer
# how fast the cost is changing as a function of the output activation
dxWRTdh = self.layers[self.configKeyList[idxW-1]]['W'].T # dxPreWRTdx-1
# print('dz/da')
# print('output of layer',idxW-1-1)
# the output of previous layer
# how fast the activation function is changing
dhWRTdhPre = fPrime(self.layers[self.configKeyList[idxW-1-1]]['o']) # dx-1WRTdx-1Pre
# print('da/dz')
delta = np.dot(delta, dxWRTdh) * dhWRTdhPre
# sanity check: Numerical Gradient Checking
# f'(x) = lim (f(x+deltax)-f(x))/deltax when deltax -> 0
# update parameters
# W = W - Gamma * dL/dW
self.layers[key]['djWRTdw'] = delta
self.layers[key]['W'] = self.layers[key]['W'] - self.learningRate/y.shape[0] * delta
# b = b - Gamma * dL/db
self.layers[key]['djWRTdb'] = deltaB
if len(self.layers[key]['b'])!=0:
self.layers[key]['b'] = self.layers[key]['b'] - self.learningRate/y.shape[0] * np.sum(deltaB, axis=0)
# Utility Functions
def summary(self, space=20):
print('{: <{}} | {: <{}} | {: <{}} | {: <{}}'.format("Layer Name", space,
"Input Shape", space,
"Output Shape", space,
"Number of Parameters",space))
for key in config:
print('{: <{}} | {: <{}} | {: <{}} | {: <{}}'.format(key, space,
self.layerShapes[key]['IS'], space,
self.layerShapes[key]['OS'], space,
self.layerShapes[key]['NP'], space))
def fit(self, X, y, XT=None, yT=None, method='batch', batchSize=None, numEpochs=None,
learningRate=None, initialState=None):
if numEpochs is None: # overwrite
numEpochs = self.numEpochs
if learningRate is not None:
self.learningRate = learningRate
if batchSize is not None:
self.batchSize = batchSize
# if initialState is not None:
# # use the given initial parameters (weights and bias)
# self.layers = initialState
if method == 'batch':
# this is infact mini-batch gradient descent, just for consistency in course material
# same as batched gradient descent in class to make it easier for you
pBar = tqdm(range(numEpochs))
for edx in pBar:
for idx in range(0, X.shape[0], self.batchSize):
start = idx
end = start + self.batchSize
end = end if end < X.shape[0] else X.shape[0]
#TODO: Support variable batchsize
if end-start != self.batchSize:
continue
x_, y_ = X[start:end, :], y[start:end, :]
yPred = self.forward(x_)
loss = self.lossFunc(y_, yPred, type=self.lossFunction)
self.backward(y_, yPred)
yPred,yPredOrig = self.predict(X)
loss = self.lossFunc(y, yPredOrig, type=self.lossFunction)
self.loss.append(loss)
acc = self.accuracy(y, yPred)
self.acc.append(acc)
if XT is not None:
yPred, yPredOrig = self.predict(XT)
loss = self.lossFunc(yT, yPredOrig, type=self.lossFunction)
self.lossT.append(loss)
acc = self.accuracy(yT, yPred)
self.accT.append(acc)
else:
raise '#Err: {} Gradient Descent Method is Not implemented!'.format(method)
def predict(self, X):
yPred = self.forward(X)
yPredOrigin = copy.deepcopy(yPred)
# last layer activation function, class prediction should be single
# and the output is between zero and one
if self.config[self.configKeyList[-1]][-1] == 'sigmoid':
yPred[yPred < 0.5] = 0
yPred[yPred >= 0.5] = 1
# multi-class problem
elif self.config[self.configKeyList[-1]][-1] == 'softmax':
raise '#Err: Prediction is not supported for softmax yet!'
# single/multi class problem, single node and it can be anything greater than 0
elif self.config[self.configKeyList[-1]][-1] == 'relu':
yPred = np.round(yPred)
yPred = np.clip(yPred, 0, self.numClass-1) # sanity check
return yPred, yPredOrigin
def error(self, y, yPred):
return self.lossFunc(y, yPred, type=self.lossFunction)
def accuracy(self, y, yPred):
return 100*np.sum(y==yPred)/y.shape[0]
def plotLoss(self, loss=None, ax=None):
if loss is None:
loss = self.loss
if ax is None:
plt.plot(loss)
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Loss Per Epoch")
plt.show()
else:
ax.plot(loss)
ax.set_xlabel("Epochs")
ax.set_ylabel("Loss")
ax.set_title("Loss Per Epoch")
def crossValidationIndices(self, index, k=5):
# index is a list of indexes
cvList = []
for idx in range(k): # iterate over k-folds
interval = int(len(index)/k)
start = idx * interval
end = start + interval
testIndexes = list(range(start,end))
trainIndexes = list(range(0,start)) + list(range(end,len(index)))
cvList.append((trainIndexes, testIndexes))
return cvList
if crossValidationFlag:
if len(learningRate) == 1:
fig, ax = plt.subplots(3,len(learningRate),figsize=(8,15))
else:
fig, ax = plt.subplots(3,len(learningRate),figsize=(30,3*(len(learningRate)+2)))
else:
fig, ax = plt.subplots(1,1+len(learningRate),figsize=(30,1+len(learningRate)))
for ldx, lr in enumerate(learningRate):
nn = neuralNetwork(config=config, numClass=numClasses, numEpochs=numEpochs,
learningRate=lr, lossFunction=lossFunction)
# Initialize the network and the weights
nn.initWeights()
if crossValidationFlag:
indexes = list(range(X.shape[0]))
cvIndices = nn.crossValidationIndices(indexes, k=kFold)
accList = []
accTList = []
lossList = []
lossTList = []
for k in range(kFold):
nn.initWeights()
XTrain, yTrain = X[cvIndices[k][0],:], y[cvIndices[k][0],:]
XTest, yTest = X[cvIndices[k][1],:], y[cvIndices[k][1],:]
# Train the network
nn.fit(XTrain, yTrain, XTest, yTest, method=gdMethod, batchSize=batchSize,
numEpochs=numEpochs, learningRate=lr)
accList.append(nn.acc)
accTList.append(nn.accT)
lossList.append(nn.loss)
lossTList.append(nn.lossT)
acc = np.mean(accList, axis=0)
accT = np.mean(accTList, axis=0)
loss = np.mean(lossList, axis=0)
lossT = np.mean(lossTList, axis=0)
# print the network structure
nn.summary()
yPred, yPredOrig = nn.predict(X)
print('#INFO: Mean squared error is {}'.format(nn.error(y,yPred)))
colors = [colorBox[int(yPred[idx])] for idx in selectedIndices]
if len(learningRate) == 1:
ax[2].scatter(XR[:, 0], XR[:, 1], s=10, color=colors)
ax[2].set_xlabel("X1")
ax[2].set_ylabel("X2")
ax[2].set_title("Data, LR: {}".format(lr))
ax[0].plot(acc)
ax[0].plot(accT)
ax[0].legend(['Train','Test'])
ax[0].set_xlabel("Epochs")
ax[0].set_ylabel("Accuracy")
ax[0].set_title("Accuracy Per Epoch"+", LR: {}".format(lr))
ax[1].plot(loss)
ax[1].plot(lossT)
ax[1].legend(['Train','Test'])
ax[1].set_xlabel("Epochs")
ax[1].set_ylabel("Loss")
ax[1].set_title("Loss Per Epoch"+", LR: {}".format(lr))
else:
ax[2,ldx].scatter(XR[:, 0], XR[:, 1], s=10, color=colors)
ax[2,ldx].set_xlabel("X1")
ax[2,ldx].set_ylabel("X2")
ax[2,ldx].set_title("Data, LR: {}".format(lr))
ax[0,ldx].plot(acc)
ax[0,ldx].plot(accT)
ax[0,ldx].legend(['Train','Test'])
ax[0,ldx].set_xlabel("Epochs")
ax[0,ldx].set_ylabel("Accuracy")
ax[0,ldx].set_title("Accuracy Per Epoch"+", LR: {}".format(lr))
ax[1,ldx].plot(loss)
ax[1,ldx].plot(lossT)
ax[1,ldx].legend(['Train','Test'])
ax[1,ldx].set_xlabel("Epochs")
ax[1,ldx].set_ylabel("Loss")
ax[1,ldx].set_title("Loss Per Epoch"+", LR: {}".format(lr))
else:
# Perform a single run for visualization.
nn.fit(X, y, method=gdMethod, batchSize=batchSize, numEpochs=numEpochs,
learningRate=lr)
# print the network structure
nn.summary()
yPred, yPredOrig = nn.predict(X)
print('#INFO: Mean squared error is {}'.format(nn.error(y,yPred)))
colors = [colorBox[int(yPred[idx])] for idx in selectedIndices]
ax[ldx+1].scatter(XR[:, 0], XR[:, 1], s=10, color=colors)
ax[ldx+1].set_xlabel("X1")
ax[ldx+1].set_ylabel("X2")
ax[ldx+1].set_title("LR: {}".format(lr))
# Plot the mean squared error with respect to the nu
nn.plotLoss(ax=ax[0])
# train accuracy
acc = nn.accuracy(y.squeeze(-1),yPred.squeeze(-1))
print('#INFO: Train Accuracy is {}'.format(acc))
if not crossValidationFlag:
ax[0].legend(["LR: "+str(lr) for lr in learningRate])
# please feel free to save subplots for a better report
fig.savefig('results.png')
| [
"noreply@github.com"
] | noreply@github.com |
3b91d9f42ee1ecda8632567b35ac5caa51d497c7 | 35053a371d85c2d45a4f52239d8a70b38194ef48 | /Count of Matches in Tournament.py | 96c8b115113e1096f964d3dcc4f40e3f4b7f16a1 | [] | no_license | Kuehar/LeetCode | 51d169c81a2e572ea854399fc78e1130220388f9 | 4555c20455f181f9dd7b3aba2a8779dea795edfb | refs/heads/master | 2023-04-16T10:13:03.584541 | 2023-04-06T11:47:21 | 2023-04-06T11:47:21 | 243,361,421 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | class Solution:
def numberOfMatches(self, n: int) -> int:
return n-1
# O(1) Solution.
# Always this answer is n-1. Sum of matches are always equals to sum of loser.
# Runtime: 28 ms, faster than 82.44% of Python3 online submissions for Count of Matches in Tournament.
# Memory Usage: 14.3 MB, less than 40.04% of Python3 online submissions for Count of Matches in Tournament.
| [
"noreply@github.com"
] | noreply@github.com |
c37ff8cfcff227220d098069e2f3040dce7f56e8 | 9145d24e2517d7f3cea6e89158806b95919449b8 | /doc/conf.py | 37c50aca46644bd4ce262e466fa2696daa55957c | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | pombredanne/coveragepy | b6de846694156581ee0b9a3348f4cfd48719855f | 2364947d7814a065cf2c05d930eda94203b20f1c | refs/heads/master | 2021-01-22T23:43:21.800229 | 2017-03-18T11:14:13 | 2017-03-18T11:14:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,618 | py | # -*- coding: utf-8 -*-
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
#
# coverage.py documentation build configuration file, created by
# sphinx-quickstart on Wed May 13 22:18:33 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinxcontrib.spelling',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Coverage.py'
copyright = u'2009\N{EN DASH}2017, Ned Batchelder' # CHANGEME
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.3.4' # CHANGEME
# The full version, including alpha/beta/rc tags.
release = '4.3.4' # CHANGEME
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'default'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
#html_style = "neds.css"
#html_add_permalinks = ""
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_templates']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = '.htm'
# Output file base name for HTML help builder.
htmlhelp_basename = 'coveragepydoc'
# -- Spelling ---
spelling_word_list_filename = 'dict.txt'
spelling_show_suggestions = False
# When auto-doc'ing a class, write the class' docstring and the __init__ docstring
# into the class docs.
autoclass_content = "class"
prerelease = bool(max(release).isalpha())
def setup(app):
app.add_stylesheet('coverage.css')
app.add_config_value('prerelease', False, 'env')
app.info("** Prerelease = %r" % prerelease)
| [
"ned@nedbatchelder.com"
] | ned@nedbatchelder.com |
875a564377d75822b6c87a33792ad8d32b40b7b6 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/datacatalog/outputs.py | 26d9e4bddb4ce2d56c83f67f19a73cd325ca56ef | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from ._enums import *
__all__ = [
'PrincipalsResponse',
]
@pulumi.output_type
class PrincipalsResponse(dict):
"""
User principals.
"""
def __init__(__self__, *,
object_id: Optional[str] = None,
upn: Optional[str] = None):
"""
User principals.
:param str object_id: Object Id for the user
:param str upn: UPN of the user.
"""
if object_id is not None:
pulumi.set(__self__, "object_id", object_id)
if upn is not None:
pulumi.set(__self__, "upn", upn)
@property
@pulumi.getter(name="objectId")
def object_id(self) -> Optional[str]:
"""
Object Id for the user
"""
return pulumi.get(self, "object_id")
@property
@pulumi.getter
def upn(self) -> Optional[str]:
"""
UPN of the user.
"""
return pulumi.get(self, "upn")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| [
"noreply@github.com"
] | noreply@github.com |
69e2f645ab6431a303076a1506514f479e530747 | 9fc5dd13e0595bd5796cd7ec109e3b7c290e2692 | /wikipedia-scape.py | a54f56c6c75b06d0d4069f56a187c27ded4d5b68 | [] | no_license | ronandoolan2/python-webscraping | 812d5190dfe5f24029b4737438c80e8d40716971 | 4dc83a331415c3e55f06b1a8d0de47710db5ccd0 | refs/heads/master | 2021-01-19T00:54:22.801053 | 2017-04-16T09:10:47 | 2017-04-16T09:10:47 | 87,218,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | from bs4 import BeautifulSoup
import urllib2
import re
wiki = "http://en.wikipedia.org/wiki/Mad_Max:_Fury_Road"
header = {'User-Agent': 'Mozilla/5.0'} #Needed to prevent 403 error on Wikipedia
req = urllib2.Request(wiki,headers=header)
page = urllib2.urlopen(req)
soup = BeautifulSoup(page)
rnd = ""
pick = ""
NFL = ""
player = ""
pos = ""
college = ""
conf = ""
notes = ""
table = soup.find("table", { "class" : "wikitable sortable" })
print table
#output = open('output.csv','w')
for row in table.findAll("tr"):
cells = row.findAll("href")
for cell in cells:
# search-term = re.search(r'director',cell)
# if search-term:
# print search-term
#print "---"
print cell.text
print cells.text
#print "---"
| [
"noreply@github.com"
] | noreply@github.com |
6ac793e3b8df59989fc5a148e4385b6fe3b6ed70 | dbab24ee5055dad1a57bb212ae30da994022eab5 | /Python/Chapter 6 - tehtävä 3.py | 4703757b6f12df00e86114119c5ffd8b7220709e | [] | no_license | MikBom/mikbom-github.io | e8ab24080a6c6383f4ad973a817e10ab84375e4f | 3dc7312798473a7620529d24fa771d5b09bafbbc | refs/heads/main | 2023-08-14T07:04:01.427822 | 2021-09-21T16:08:32 | 2021-09-21T16:08:32 | 301,965,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | vari = input("Valitse kohde (1-3):")
if vari == "1":
print("Haukion Kala Oy")
elif vari == "2":
print("Metallipaja VasaraAika")
elif vari == "3":
print("Balin palapelitehdas") | [
"noreply@github.com"
] | noreply@github.com |
1aeaca94f2d4d9feb9733db3c8cad22d7ff94e80 | cf5b2850dc9794eb0fc11826da4fd3ea6c22e9b1 | /examples/conditional_format.py | 868eec6890126a075a32371064be80ab9628e826 | [
"BSD-2-Clause"
] | permissive | glasah/XlsxWriter | bcf74b43b9c114e45e1a3dd679b5ab49ee20a0ec | 1e8aaeb03000dc2f294ccb89b33806ac40dabc13 | refs/heads/main | 2023-09-05T03:03:53.857387 | 2021-11-01T07:35:46 | 2021-11-01T07:35:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,956 | py | ###############################################################################
#
# Example of how to add conditional formatting to an XlsxWriter file.
#
# Conditional formatting allows you to apply a format to a cell or a
# range of cells based on certain criteria.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright 2013-2021, John McNamara, jmcnamara@cpan.org
#
import xlsxwriter
workbook = xlsxwriter.Workbook('conditional_format.xlsx')
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet3 = workbook.add_worksheet()
worksheet4 = workbook.add_worksheet()
worksheet5 = workbook.add_worksheet()
worksheet6 = workbook.add_worksheet()
worksheet7 = workbook.add_worksheet()
worksheet8 = workbook.add_worksheet()
worksheet9 = workbook.add_worksheet()
# Add a format. Light red fill with dark red text.
format1 = workbook.add_format({'bg_color': '#FFC7CE',
'font_color': '#9C0006'})
# Add a format. Green fill with dark green text.
format2 = workbook.add_format({'bg_color': '#C6EFCE',
'font_color': '#006100'})
# Some sample data to run the conditional formatting against.
data = [
[34, 72, 38, 30, 75, 48, 75, 66, 84, 86],
[6, 24, 1, 84, 54, 62, 60, 3, 26, 59],
[28, 79, 97, 13, 85, 93, 93, 22, 5, 14],
[27, 71, 40, 17, 18, 79, 90, 93, 29, 47],
[88, 25, 33, 23, 67, 1, 59, 79, 47, 36],
[24, 100, 20, 88, 29, 33, 38, 54, 54, 88],
[6, 57, 88, 28, 10, 26, 37, 7, 41, 48],
[52, 78, 1, 96, 26, 45, 47, 33, 96, 36],
[60, 54, 81, 66, 81, 90, 80, 93, 12, 55],
[70, 5, 46, 14, 71, 19, 66, 36, 41, 21],
]
###############################################################################
#
# Example 1.
#
caption = ('Cells with values >= 50 are in light red. '
'Values < 50 are in light green.')
# Write the data.
worksheet1.write('A1', caption)
for row, row_data in enumerate(data):
worksheet1.write_row(row + 2, 1, row_data)
# Write a conditional format over a range.
worksheet1.conditional_format('B3:K12', {'type': 'cell',
'criteria': '>=',
'value': 50,
'format': format1})
# Write another conditional format over the same range.
worksheet1.conditional_format('B3:K12', {'type': 'cell',
'criteria': '<',
'value': 50,
'format': format2})
###############################################################################
#
# Example 2.
#
caption = ('Values between 30 and 70 are in light red. '
'Values outside that range are in light green.')
worksheet2.write('A1', caption)
for row, row_data in enumerate(data):
worksheet2.write_row(row + 2, 1, row_data)
worksheet2.conditional_format('B3:K12', {'type': 'cell',
'criteria': 'between',
'minimum': 30,
'maximum': 70,
'format': format1})
worksheet2.conditional_format('B3:K12', {'type': 'cell',
'criteria': 'not between',
'minimum': 30,
'maximum': 70,
'format': format2})
###############################################################################
#
# Example 3.
#
caption = ('Duplicate values are in light red. '
'Unique values are in light green.')
worksheet3.write('A1', caption)
for row, row_data in enumerate(data):
worksheet3.write_row(row + 2, 1, row_data)
worksheet3.conditional_format('B3:K12', {'type': 'duplicate',
'format': format1})
worksheet3.conditional_format('B3:K12', {'type': 'unique',
'format': format2})
###############################################################################
#
# Example 4.
#
caption = ('Above average values are in light red. '
'Below average values are in light green.')
worksheet4.write('A1', caption)
for row, row_data in enumerate(data):
worksheet4.write_row(row + 2, 1, row_data)
worksheet4.conditional_format('B3:K12', {'type': 'average',
'criteria': 'above',
'format': format1})
worksheet4.conditional_format('B3:K12', {'type': 'average',
'criteria': 'below',
'format': format2})
###############################################################################
#
# Example 5.
#
caption = ('Top 10 values are in light red. '
'Bottom 10 values are in light green.')
worksheet5.write('A1', caption)
for row, row_data in enumerate(data):
worksheet5.write_row(row + 2, 1, row_data)
worksheet5.conditional_format('B3:K12', {'type': 'top',
'value': '10',
'format': format1})
worksheet5.conditional_format('B3:K12', {'type': 'bottom',
'value': '10',
'format': format2})
###############################################################################
#
# Example 6.
#
caption = ('Cells with values >= 50 are in light red. '
'Values < 50 are in light green. Non-contiguous ranges.')
# Write the data.
worksheet6.write('A1', caption)
for row, row_data in enumerate(data):
worksheet6.write_row(row + 2, 1, row_data)
# Write a conditional format over a range.
worksheet6.conditional_format('B3:K6', {'type': 'cell',
'criteria': '>=',
'value': 50,
'format': format1,
'multi_range': 'B3:K6 B9:K12'})
# Write another conditional format over the same range.
worksheet6.conditional_format('B3:K6', {'type': 'cell',
'criteria': '<',
'value': 50,
'format': format2,
'multi_range': 'B3:K6 B9:K12'})
###############################################################################
#
# Example 7.
#
caption = 'Examples of color scales with default and user colors.'
data = range(1, 13)
worksheet7.write('A1', caption)
worksheet7.write('B2', "2 Color Scale")
worksheet7.write('D2', "2 Color Scale + user colors")
worksheet7.write('G2', "3 Color Scale")
worksheet7.write('I2', "3 Color Scale + user colors")
for row, row_data in enumerate(data):
worksheet7.write(row + 2, 1, row_data)
worksheet7.write(row + 2, 3, row_data)
worksheet7.write(row + 2, 6, row_data)
worksheet7.write(row + 2, 8, row_data)
worksheet7.conditional_format('B3:B14', {'type': '2_color_scale'})
worksheet7.conditional_format('D3:D14', {'type': '2_color_scale',
'min_color': "#FF0000",
'max_color': "#00FF00"})
worksheet7.conditional_format('G3:G14', {'type': '3_color_scale'})
worksheet7.conditional_format('I3:I14', {'type': '3_color_scale',
'min_color': "#C5D9F1",
'mid_color': "#8DB4E3",
'max_color': "#538ED5"})
###############################################################################
#
# Example 8.
#
caption = 'Examples of data bars.'
worksheet8.write('A1', caption)
worksheet8.write('B2', "Default data bars")
worksheet8.write('D2', "Bars only")
worksheet8.write('F2', "With user color")
worksheet8.write('H2', "Solid bars")
worksheet8.write('J2', "Right to left")
worksheet8.write('L2', "Excel 2010 style")
worksheet8.write('N2', "Negative same as positive")
data = range(1, 13)
for row, row_data in enumerate(data):
worksheet8.write(row + 2, 1, row_data)
worksheet8.write(row + 2, 3, row_data)
worksheet8.write(row + 2, 5, row_data)
worksheet8.write(row + 2, 7, row_data)
worksheet8.write(row + 2, 9, row_data)
data = [-1, -2, -3, -2, -1, 0, 1, 2, 3, 2, 1, 0]
for row, row_data in enumerate(data):
worksheet8.write(row + 2, 11, row_data)
worksheet8.write(row + 2, 13, row_data)
worksheet8.conditional_format('B3:B14', {'type': 'data_bar'})
worksheet8.conditional_format('D3:D14', {'type': 'data_bar',
'bar_only': True})
worksheet8.conditional_format('F3:F14', {'type': 'data_bar',
'bar_color': '#63C384'})
worksheet8.conditional_format('H3:H14', {'type': 'data_bar',
'bar_solid': True})
worksheet8.conditional_format('J3:J14', {'type': 'data_bar',
'bar_direction': 'right'})
worksheet8.conditional_format('L3:L14', {'type': 'data_bar',
'data_bar_2010': True})
worksheet8.conditional_format('N3:N14', {'type': 'data_bar',
'bar_negative_color_same': True,
'bar_negative_border_color_same': True})
###############################################################################
#
# Example 9.
#
caption = 'Examples of conditional formats with icon sets.'
data = [
[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3, 4],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
]
worksheet9.write('A1', caption)
for row, row_data in enumerate(data):
worksheet9.write_row(row + 2, 1, row_data)
worksheet9.conditional_format('B3:D3', {'type': 'icon_set',
'icon_style': '3_traffic_lights'})
worksheet9.conditional_format('B4:D4', {'type': 'icon_set',
'icon_style': '3_traffic_lights',
'reverse_icons': True})
worksheet9.conditional_format('B5:D5', {'type': 'icon_set',
'icon_style': '3_traffic_lights',
'icons_only': True})
worksheet9.conditional_format('B6:D6', {'type': 'icon_set',
'icon_style': '3_arrows'})
worksheet9.conditional_format('B7:E7', {'type': 'icon_set',
'icon_style': '4_arrows'})
worksheet9.conditional_format('B8:F8', {'type': 'icon_set',
'icon_style': '5_arrows'})
worksheet9.conditional_format('B9:F9', {'type': 'icon_set',
'icon_style': '5_ratings'})
workbook.close()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
96f12a1ab1eb7f33d8ce8497a6de454ae5054716 | 12fe05ebba89ea0f11d6f5d2fd8f047ee6369ff6 | /minmax3.py | c6f28978343f73c011e14f3c2fb0c7170c66fa0b | [] | no_license | daniilvarlamov/domzad | d467c4b9f51a1a640b0b001216849131c2463500 | 69e1b4c6fa27dc4d17499cfc6817c97d90f8391a | refs/heads/main | 2023-01-20T21:58:33.078060 | 2020-11-26T09:18:16 | 2020-11-26T09:18:16 | 303,324,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | N = int(input("Введите количество прямоугольников")
for i in range (N):
a = int(input("Введите стороны прямоугольника")
b = int(input())
P = 2*(a+b)
if (i=1):
Max = P
if (P>Max):
Max = P
print(Max)
| [
"noreply@github.com"
] | noreply@github.com |
d35605db5bdf283207a2c171638328c4c8b53252 | 4e30d990963870478ed248567e432795f519e1cc | /tests/api/v3_1_1/test_nbar_app.py | 13a1bcd9798917799871178339c1315dd3a03d61 | [
"MIT"
] | permissive | CiscoISE/ciscoisesdk | 84074a57bf1042a735e3fc6eb7876555150d2b51 | f468c54998ec1ad85435ea28988922f0573bfee8 | refs/heads/main | 2023-09-04T23:56:32.232035 | 2023-08-25T17:31:49 | 2023-08-25T17:31:49 | 365,359,531 | 48 | 9 | MIT | 2023-08-25T17:31:51 | 2021-05-07T21:43:52 | Python | UTF-8 | Python | false | false | 9,399 | py | # -*- coding: utf-8 -*-
"""IdentityServicesEngineAPI nbar_app API fixtures and tests.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from ciscoisesdk.exceptions import MalformedRequest
from ciscoisesdk.exceptions import ciscoisesdkException
from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION
pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.1', reason='version does not match')
def is_valid_get_nbar_apps(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_1e8a476ad8455fdebad0d8973c810495_v3_1_1').validate(obj.response)
return True
def get_nbar_apps(api):
endpoint_result = api.nbar_app.get_nbar_apps(
filter='value1,value2',
filter_type='string',
page=0,
size=0,
sort='string',
sort_by='string'
)
return endpoint_result
@pytest.mark.nbar_app
def test_get_nbar_apps(api, validator):
try:
assert is_valid_get_nbar_apps(
validator,
get_nbar_apps(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_nbar_apps_default(api):
endpoint_result = api.nbar_app.get_nbar_apps(
filter=None,
filter_type=None,
page=None,
size=None,
sort=None,
sort_by=None
)
return endpoint_result
@pytest.mark.nbar_app
def test_get_nbar_apps_default(api, validator):
try:
assert is_valid_get_nbar_apps(
validator,
get_nbar_apps_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_nbar_app(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_ccc30178afce5e51a65e96cd95ca1773_v3_1_1').validate(obj.response)
return True
def create_nbar_app(api):
endpoint_result = api.nbar_app.create_nbar_app(
active_validation=False,
description='string',
id='string',
name='string',
network_identities=[{'ports': 'string', 'protocol': 'string'}],
payload=None
)
return endpoint_result
@pytest.mark.nbar_app
def test_create_nbar_app(api, validator):
try:
assert is_valid_create_nbar_app(
validator,
create_nbar_app(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def create_nbar_app_default(api):
endpoint_result = api.nbar_app.create_nbar_app(
active_validation=False,
description=None,
id=None,
name=None,
network_identities=None,
payload=None
)
return endpoint_result
@pytest.mark.nbar_app
def test_create_nbar_app_default(api, validator):
try:
assert is_valid_create_nbar_app(
validator,
create_nbar_app_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_nbar_app_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_61e99726f3745554a07ee102f74fe3bd_v3_1_1').validate(obj.response)
return True
def get_nbar_app_by_id(api):
endpoint_result = api.nbar_app.get_nbar_app_by_id(
id='string'
)
return endpoint_result
@pytest.mark.nbar_app
def test_get_nbar_app_by_id(api, validator):
try:
assert is_valid_get_nbar_app_by_id(
validator,
get_nbar_app_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_nbar_app_by_id_default(api):
endpoint_result = api.nbar_app.get_nbar_app_by_id(
id='string'
)
return endpoint_result
@pytest.mark.nbar_app
def test_get_nbar_app_by_id_default(api, validator):
try:
assert is_valid_get_nbar_app_by_id(
validator,
get_nbar_app_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_nbar_app_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_b55622f1671359919573b261ba16ea71_v3_1_1').validate(obj.response)
return True
def update_nbar_app_by_id(api):
endpoint_result = api.nbar_app.update_nbar_app_by_id(
active_validation=False,
description='string',
id='string',
name='string',
network_identities=[{'ports': 'string', 'protocol': 'string'}],
payload=None
)
return endpoint_result
@pytest.mark.nbar_app
def test_update_nbar_app_by_id(api, validator):
try:
assert is_valid_update_nbar_app_by_id(
validator,
update_nbar_app_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def update_nbar_app_by_id_default(api):
endpoint_result = api.nbar_app.update_nbar_app_by_id(
active_validation=False,
id='string',
description=None,
name=None,
network_identities=None,
payload=None
)
return endpoint_result
@pytest.mark.nbar_app
def test_update_nbar_app_by_id_default(api, validator):
try:
assert is_valid_update_nbar_app_by_id(
validator,
update_nbar_app_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_nbar_app_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_44d289d5685350f5b00f130db0a45142_v3_1_1').validate(obj.response)
return True
def delete_nbar_app_by_id(api):
endpoint_result = api.nbar_app.delete_nbar_app_by_id(
id='string'
)
return endpoint_result
@pytest.mark.nbar_app
def test_delete_nbar_app_by_id(api, validator):
try:
assert is_valid_delete_nbar_app_by_id(
validator,
delete_nbar_app_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def delete_nbar_app_by_id_default(api):
endpoint_result = api.nbar_app.delete_nbar_app_by_id(
id='string'
)
return endpoint_result
@pytest.mark.nbar_app
def test_delete_nbar_app_by_id_default(api, validator):
try:
assert is_valid_delete_nbar_app_by_id(
validator,
delete_nbar_app_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
| [
"wastorga@altus.co.cr"
] | wastorga@altus.co.cr |
8cb3d749f4466525d40f270c8a048fd83397d6b0 | e25e7f0d944d302c2fd13b7517d97c5e0b5558ec | /FixTree_TBCNN/pycparser/c_parser.py | 9a9d09657ad6d9acb7465f692d2e3c1c7d25ba04 | [] | no_license | NizhenJenny/FixTree | 06702a0d529d861e34b045aac286434b0ce3d86f | be30a2cdeb6cc0aa13f29d2cd4d4ce325f00f2a0 | refs/heads/master | 2020-05-24T21:33:04.030992 | 2019-08-19T09:52:10 | 2019-08-19T09:52:10 | 187,477,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63,913 | py | #------------------------------------------------------------------------------
# pycparser: c_parser.py
#
# CParser class: Parser and AST builder for the C language
#
# Copyright (C) 2008-2015, Eli Bendersky
# License: BSD
#------------------------------------------------------------------------------
import re
from .ply import yacc
from . import c_ast
from .c_lexer import CLexer
from .plyparser import PLYParser, Coord, ParseError
from .ast_transforms import fix_switch_cases
class CParser(PLYParser):
def __init__(
self,
lex_optimize=True,
lextab='pycparser.lextab',
yacc_optimize=True,
yacctab='pycparser.yacctab',
yacc_debug=False,
taboutputdir=''):
""" Create a new CParser.
Some arguments for controlling the debug/optimization
level of the parser are provided. The defaults are
tuned for release/performance mode.
The simple rules for using them are:
*) When tweaking CParser/CLexer, set these to False
*) When releasing a stable parser, set to True
lex_optimize:
Set to False when you're modifying the lexer.
Otherwise, changes in the lexer won't be used, if
some lextab.py file exists.
When releasing with a stable lexer, set to True
to save the re-generation of the lexer table on
each run.
lextab:
Points to the lex table that's used for optimized
mode. Only if you're modifying the lexer and want
some tests to avoid re-generating the table, make
this point to a local lex table file (that's been
earlier generated with lex_optimize=True)
yacc_optimize:
Set to False when you're modifying the parser.
Otherwise, changes in the parser won't be used, if
some parsetab.py file exists.
When releasing with a stable parser, set to True
to save the re-generation of the parser table on
each run.
yacctab:
Points to the yacc table that's used for optimized
mode. Only if you're modifying the parser, make
this point to a local yacc table file
yacc_debug:
Generate a parser.out file that explains how yacc
built the parsing table from the grammar.
taboutputdir:
Set this parameter to control the location of generated
lextab and yacctab files.
"""
self.clex = CLexer(
error_func=self._lex_error_func,
on_lbrace_func=self._lex_on_lbrace_func,
on_rbrace_func=self._lex_on_rbrace_func,
type_lookup_func=self._lex_type_lookup_func)
self.clex.build(
optimize=lex_optimize,
lextab=lextab,
outputdir=taboutputdir)
self.tokens = self.clex.tokens
rules_with_opt = [
'abstract_declarator',
'assignment_expression',
'declaration_list',
'declaration_specifiers',
'designation',
'expression',
'identifier_list',
'init_declarator_list',
'initializer_list',
'parameter_type_list',
'specifier_qualifier_list',
'block_item_list',
'type_qualifier_list',
'struct_declarator_list'
]
for rule in rules_with_opt:
self._create_opt_rule(rule)
self.cparser = yacc.yacc(
module=self,
start='translation_unit_or_empty',
debug=yacc_debug,
optimize=yacc_optimize,
tabmodule=yacctab,
outputdir=taboutputdir)
# Stack of scopes for keeping track of symbols. _scope_stack[-1] is
# the current (topmost) scope. Each scope is a dictionary that
# specifies whether a name is a type. If _scope_stack[n][name] is
# True, 'name' is currently a type in the scope. If it's False,
# 'name' is used in the scope but not as a type (for instance, if we
# saw: int name;
# If 'name' is not a key in _scope_stack[n] then 'name' was not defined
# in this scope at all.
self._scope_stack = [dict()]
# Keeps track of the last token given to yacc (the lookahead token)
self._last_yielded_token = None
def parse(self, text, filename='', debuglevel=0):
""" Parses C code and returns an AST.
text:
A string containing the C source code
filename:
Name of the file being parsed (for meaningful
error messages)
debuglevel:
Debug level to yacc
"""
self.clex.filename = filename
self.clex.reset_lineno()
self._scope_stack = [dict()]
self._last_yielded_token = None
return self.cparser.parse(
input=text,
lexer=self.clex,
debug=debuglevel)
######################-- PRIVATE --######################
def _push_scope(self):
self._scope_stack.append(dict())
def _pop_scope(self):
assert len(self._scope_stack) > 1
self._scope_stack.pop()
def _add_typedef_name(self, name, coord):
""" Add a new typedef name (ie a TYPEID) to the current scope
"""
if not self._scope_stack[-1].get(name, True):
self._parse_error(
"Typedef %r previously declared as non-typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = True
def _add_identifier(self, name, coord):
""" Add a new object, function, or enum member name (ie an ID) to the
current scope
"""
if self._scope_stack[-1].get(name, False):
self._parse_error(
"Non-typedef %r previously declared as typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = False
def _is_type_in_scope(self, name):
""" Is *name* a typedef-name in the current scope?
"""
for scope in reversed(self._scope_stack):
# If name is an identifier in this scope it shadows typedefs in
# higher scopes.
in_scope = scope.get(name)
if in_scope is not None: return in_scope
return False
def _lex_error_func(self, msg, line, column):
self._parse_error(msg, self._coord(line, column))
def _lex_on_lbrace_func(self):
self._push_scope()
def _lex_on_rbrace_func(self):
self._pop_scope()
def _lex_type_lookup_func(self, name):
""" Looks up types that were previously defined with
typedef.
Passed to the lexer for recognizing identifiers that
are types.
"""
is_type = self._is_type_in_scope(name)
return is_type
def _get_yacc_lookahead_token(self):
""" We need access to yacc's lookahead token in certain cases.
This is the last token yacc requested from the lexer, so we
ask the lexer.
"""
return self.clex.last_token
# To understand what's going on here, read sections A.8.5 and
# A.8.6 of K&R2 very carefully.
#
# A C type consists of a basic type declaration, with a list
# of modifiers. For example:
#
# int *c[5];
#
# The basic declaration here is 'int c', and the pointer and
# the array are the modifiers.
#
# Basic declarations are represented by TypeDecl (from module c_ast) and the
# modifiers are FuncDecl, PtrDecl and ArrayDecl.
#
# The standard states that whenever a new modifier is parsed, it should be
# added to the end of the list of modifiers. For example:
#
# K&R2 A.8.6.2: Array Declarators
#
# In a declaration T D where D has the form
# D1 [constant-expression-opt]
# and the type of the identifier in the declaration T D1 is
# "type-modifier T", the type of the
# identifier of D is "type-modifier array of T"
#
# This is what this method does. The declarator it receives
# can be a list of declarators ending with TypeDecl. It
# tacks the modifier to the end of this list, just before
# the TypeDecl.
#
# Additionally, the modifier may be a list itself. This is
# useful for pointers, that can come as a chain from the rule
# p_pointer. In this case, the whole modifier list is spliced
# into the new location.
def _type_modify_decl(self, decl, modifier):
""" Tacks a type modifier on a declarator, and returns
the modified declarator.
Note: the declarator and modifier may be modified
"""
#~ print '****'
#~ decl.show(offset=3)
#~ modifier.show(offset=3)
#~ print '****'
modifier_head = modifier
modifier_tail = modifier
# The modifier may be a nested list. Reach its tail.
#
while modifier_tail.type:
modifier_tail = modifier_tail.type
# If the decl is a basic type, just tack the modifier onto
# it
#
if isinstance(decl, c_ast.TypeDecl):
modifier_tail.type = decl
return modifier
else:
# Otherwise, the decl is a list of modifiers. Reach
# its tail and splice the modifier onto the tail,
# pointing to the underlying basic type.
#
decl_tail = decl
while not isinstance(decl_tail.type, c_ast.TypeDecl):
decl_tail = decl_tail.type
modifier_tail.type = decl_tail.type
decl_tail.type = modifier_head
return decl
# Due to the order in which declarators are constructed,
# they have to be fixed in order to look like a normal AST.
#
# When a declaration arrives from syntax construction, it has
# these problems:
# * The innermost TypeDecl has no type (because the basic
# type is only known at the uppermost declaration level)
# * The declaration has no variable name, since that is saved
# in the innermost TypeDecl
# * The typename of the declaration is a list of type
# specifiers, and not a node. Here, basic identifier types
# should be separated from more complex types like enums
# and structs.
#
# This method fixes these problems.
#
def _fix_decl_name_type(self, decl, typename):
""" Fixes a declaration. Modifies decl.
"""
# Reach the underlying basic type
#
type = decl
while not isinstance(type, c_ast.TypeDecl):
type = type.type
decl.name = type.declname
type.quals = decl.quals
# The typename is a list of types. If any type in this
# list isn't an IdentifierType, it must be the only
# type in the list (it's illegal to declare "int enum ..")
# If all the types are basic, they're collected in the
# IdentifierType holder.
#
for tn in typename:
if not isinstance(tn, c_ast.IdentifierType):
if len(typename) > 1:
self._parse_error(
"Invalid multiple types specified", tn.coord)
else:
type.type = tn
return decl
if not typename:
# Functions default to returning int
#
if not isinstance(decl.type, c_ast.FuncDecl):
self._parse_error(
"Missing type in declaration", decl.coord)
type.type = c_ast.IdentifierType(
['int'],
coord=decl.coord)
else:
# At this point, we know that typename is a list of IdentifierType
# nodes. Concatenate all the names into a single list.
#
type.type = c_ast.IdentifierType(
[name for id in typename for name in id.names],
coord=typename[0].coord)
return decl
def _add_declaration_specifier(self, declspec, newspec, kind):
""" Declaration specifiers are represented by a dictionary
with the entries:
* qual: a list of type qualifiers
* storage: a list of storage type qualifiers
* type: a list of type specifiers
* function: a list of function specifiers
This method is given a declaration specifier, and a
new specifier of a given kind.
Returns the declaration specifier, with the new
specifier incorporated.
"""
spec = declspec or dict(qual=[], storage=[], type=[], function=[])
spec[kind].insert(0, newspec)
return spec
def _build_declarations(self, spec, decls, typedef_namespace=False):
""" Builds a list of declarations all sharing the given specifiers.
If typedef_namespace is true, each declared name is added
to the "typedef namespace", which also includes objects,
functions, and enum constants.
"""
is_typedef = 'typedef' in spec['storage']
declarations = []
# Bit-fields are allowed to be unnamed.
#
if decls[0].get('bitsize') is not None:
pass
# When redeclaring typedef names as identifiers in inner scopes, a
# problem can occur where the identifier gets grouped into
# spec['type'], leaving decl as None. This can only occur for the
# first declarator.
#
elif decls[0]['decl'] is None:
if len(spec['type']) < 2 or len(spec['type'][-1].names) != 1 or \
not self._is_type_in_scope(spec['type'][-1].names[0]):
coord = '?'
for t in spec['type']:
if hasattr(t, 'coord'):
coord = t.coord
break
self._parse_error('Invalid declaration', coord)
# Make this look as if it came from "direct_declarator:ID"
decls[0]['decl'] = c_ast.TypeDecl(
declname=spec['type'][-1].names[0],
type=None,
quals=None,
coord=spec['type'][-1].coord)
# Remove the "new" type's name from the end of spec['type']
del spec['type'][-1]
# A similar problem can occur where the declaration ends up looking
# like an abstract declarator. Give it a name if this is the case.
#
elif not isinstance(decls[0]['decl'],
(c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
decls_0_tail = decls[0]['decl']
while not isinstance(decls_0_tail, c_ast.TypeDecl):
decls_0_tail = decls_0_tail.type
if decls_0_tail.declname is None:
decls_0_tail.declname = spec['type'][-1].names[0]
del spec['type'][-1]
for decl in decls:
assert decl['decl'] is not None
if is_typedef:
declaration = c_ast.Typedef(
name=None,
quals=spec['qual'],
storage=spec['storage'],
type=decl['decl'],
coord=decl['decl'].coord)
else:
declaration = c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=decl['decl'],
init=decl.get('init'),
bitsize=decl.get('bitsize'),
coord=decl['decl'].coord)
if isinstance(declaration.type,
(c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
fixed_decl = declaration
else:
fixed_decl = self._fix_decl_name_type(declaration, spec['type'])
# Add the type name defined by typedef to a
# symbol table (for usage in the lexer)
#
if typedef_namespace:
if is_typedef:
self._add_typedef_name(fixed_decl.name, fixed_decl.coord)
else:
self._add_identifier(fixed_decl.name, fixed_decl.coord)
declarations.append(fixed_decl)
return declarations
def _build_function_definition(self, spec, decl, param_decls, body):
""" Builds a function definition.
"""
assert 'typedef' not in spec['storage']
declaration = self._build_declarations(
spec=spec,
decls=[dict(decl=decl, init=None)],
typedef_namespace=True)[0]
return c_ast.FuncDef(
decl=declaration,
param_decls=param_decls,
body=body,
coord=decl.coord)
def _select_struct_union_class(self, token):
""" Given a token (either STRUCT or UNION), selects the
appropriate AST class.
"""
if token == 'struct':
return c_ast.Struct
else:
return c_ast.Union
##
## Precedence and associativity of operators
##
precedence = (
('left', 'LOR'),
('left', 'LAND'),
('left', 'OR'),
('left', 'XOR'),
('left', 'AND'),
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'RSHIFT', 'LSHIFT'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE', 'MOD')
)
##
## Grammar productions
## Implementation of the BNF defined in K&R2 A.13
##
# Wrapper around a translation unit, to allow for empty input.
# Not strictly part of the C99 Grammar, but useful in practice.
#
def p_translation_unit_or_empty(self, p):
""" translation_unit_or_empty : translation_unit
| empty
"""
if p[1] is None:
p[0] = c_ast.FileAST([])
else:
p[0] = c_ast.FileAST(p[1])
def p_translation_unit_1(self, p):
""" translation_unit : external_declaration
"""
# Note: external_declaration is already a list
#
p[0] = p[1]
def p_translation_unit_2(self, p):
""" translation_unit : translation_unit external_declaration
"""
if p[2] is not None:
p[1].extend(p[2])
p[0] = p[1]
# Declarations always come as lists (because they can be
# several in one line), so we wrap the function definition
# into a list as well, to make the return value of
# external_declaration homogenous.
#
def p_external_declaration_1(self, p):
""" external_declaration : function_definition
"""
p[0] = [p[1]]
def p_external_declaration_2(self, p):
""" external_declaration : declaration
"""
p[0] = p[1]
def p_external_declaration_3(self, p):
""" external_declaration : pp_directive
"""
p[0] = p[1]
def p_external_declaration_4(self, p):
""" external_declaration : SEMI
"""
p[0] = None
def p_pp_directive(self, p):
""" pp_directive : PPHASH
"""
self._parse_error('Directives not supported yet',
self._coord(p.lineno(1)))
# In function definitions, the declarator can be followed by
# a declaration list, for old "K&R style" function definitios.
#
def p_function_definition_1(self, p):
""" function_definition : declarator declaration_list_opt compound_statement
"""
# no declaration specifiers - 'int' becomes the default type
spec = dict(
qual=[],
storage=[],
type=[c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))],
function=[])
p[0] = self._build_function_definition(
spec=spec,
decl=p[1],
param_decls=p[2],
body=p[3])
def p_function_definition_2(self, p):
""" function_definition : declaration_specifiers declarator declaration_list_opt compound_statement
"""
spec = p[1]
p[0] = self._build_function_definition(
spec=spec,
decl=p[2],
param_decls=p[3],
body=p[4])
def p_statement(self, p):
""" statement : labeled_statement
| expression_statement
| compound_statement
| selection_statement
| iteration_statement
| jump_statement
"""
p[0] = p[1]
# In C, declarations can come several in a line:
# int x, *px, romulo = 5;
#
# However, for the AST, we will split them to separate Decl
# nodes.
#
# This rule splits its declarations and always returns a list
# of Decl nodes, even if it's one element long.
#
def p_decl_body(self, p):
""" decl_body : declaration_specifiers init_declarator_list_opt
"""
spec = p[1]
# p[2] (init_declarator_list_opt) is either a list or None
#
if p[2] is None:
# By the standard, you must have at least one declarator unless
# declaring a structure tag, a union tag, or the members of an
# enumeration.
#
ty = spec['type']
s_u_or_e = (c_ast.Struct, c_ast.Union, c_ast.Enum)
if len(ty) == 1 and isinstance(ty[0], s_u_or_e):
decls = [c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=ty[0],
init=None,
bitsize=None,
coord=ty[0].coord)]
# However, this case can also occur on redeclared identifiers in
# an inner scope. The trouble is that the redeclared type's name
# gets grouped into declaration_specifiers; _build_declarations
# compensates for this.
#
else:
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)],
typedef_namespace=True)
else:
decls = self._build_declarations(
spec=spec,
decls=p[2],
typedef_namespace=True)
p[0] = decls
# The declaration has been split to a decl_body sub-rule and
# SEMI, because having them in a single rule created a problem
# for defining typedefs.
#
# If a typedef line was directly followed by a line using the
# type defined with the typedef, the type would not be
# recognized. This is because to reduce the declaration rule,
# the parser's lookahead asked for the token after SEMI, which
# was the type from the next line, and the lexer had no chance
# to see the updated type symbol table.
#
# Splitting solves this problem, because after seeing SEMI,
# the parser reduces decl_body, which actually adds the new
# type into the table to be seen by the lexer before the next
# line is reached.
def p_declaration(self, p):
""" declaration : decl_body SEMI
"""
p[0] = p[1]
# Since each declaration is a list of declarations, this
# rule will combine all the declarations and return a single
# list
#
def p_declaration_list(self, p):
""" declaration_list : declaration
| declaration_list declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
def p_declaration_specifiers_1(self, p):
""" declaration_specifiers : type_qualifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_declaration_specifiers_2(self, p):
""" declaration_specifiers : type_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
def p_declaration_specifiers_3(self, p):
""" declaration_specifiers : storage_class_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'storage')
def p_declaration_specifiers_4(self, p):
""" declaration_specifiers : function_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'function')
def p_storage_class_specifier(self, p):
""" storage_class_specifier : AUTO
| REGISTER
| STATIC
| EXTERN
| TYPEDEF
"""
p[0] = p[1]
def p_function_specifier(self, p):
""" function_specifier : INLINE
"""
p[0] = p[1]
def p_type_specifier_1(self, p):
""" type_specifier : VOID
| _BOOL
| CHAR
| SHORT
| INT
| LONG
| FLOAT
| DOUBLE
| _COMPLEX
| SIGNED
| UNSIGNED
"""
p[0] = c_ast.IdentifierType([p[1]], coord=self._coord(p.lineno(1)))
def p_type_specifier_2(self, p):
""" type_specifier : typedef_name
| enum_specifier
| struct_or_union_specifier
"""
p[0] = p[1]
def p_type_qualifier(self, p):
""" type_qualifier : CONST
| RESTRICT
| VOLATILE
"""
p[0] = p[1]
def p_init_declarator_list_1(self, p):
""" init_declarator_list : init_declarator
| init_declarator_list COMMA init_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# If the code is declaring a variable that was declared a typedef in an
# outer scope, yacc will think the name is part of declaration_specifiers,
# not init_declarator, and will then get confused by EQUALS. Pass None
# up in place of declarator, and handle this at a higher level.
#
def p_init_declarator_list_2(self, p):
""" init_declarator_list : EQUALS initializer
"""
p[0] = [dict(decl=None, init=p[2])]
# Similarly, if the code contains duplicate typedefs of, for example,
# array types, the array portion will appear as an abstract declarator.
#
def p_init_declarator_list_3(self, p):
""" init_declarator_list : abstract_declarator
"""
p[0] = [dict(decl=p[1], init=None)]
# Returns a {decl=<declarator> : init=<initializer>} dictionary
# If there's no initializer, uses None
#
def p_init_declarator(self, p):
""" init_declarator : declarator
| declarator EQUALS initializer
"""
p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None))
def p_specifier_qualifier_list_1(self, p):
""" specifier_qualifier_list : type_qualifier specifier_qualifier_list_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_specifier_qualifier_list_2(self, p):
""" specifier_qualifier_list : type_specifier specifier_qualifier_list_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
# TYPEID is allowed here (and in other struct/enum related tag names), because
# struct/enum tags reside in their own namespace and can be named the same as types
#
def p_struct_or_union_specifier_1(self, p):
""" struct_or_union_specifier : struct_or_union ID
| struct_or_union TYPEID
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=p[2],
decls=None,
coord=self._coord(p.lineno(2)))
def p_struct_or_union_specifier_2(self, p):
""" struct_or_union_specifier : struct_or_union brace_open struct_declaration_list brace_close
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=None,
decls=p[3],
coord=self._coord(p.lineno(2)))
def p_struct_or_union_specifier_3(self, p):
""" struct_or_union_specifier : struct_or_union ID brace_open struct_declaration_list brace_close
| struct_or_union TYPEID brace_open struct_declaration_list brace_close
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=p[2],
decls=p[4],
coord=self._coord(p.lineno(2)))
def p_struct_or_union(self, p):
""" struct_or_union : STRUCT
| UNION
"""
p[0] = p[1]
# Combine all declarations into a single list
#
def p_struct_declaration_list(self, p):
""" struct_declaration_list : struct_declaration
| struct_declaration_list struct_declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
def p_struct_declaration_1(self, p):
""" struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI
"""
spec = p[1]
assert 'typedef' not in spec['storage']
if p[2] is not None:
decls = self._build_declarations(
spec=spec,
decls=p[2])
elif len(spec['type']) == 1:
# Anonymous struct/union, gcc extension, C1x feature.
# Although the standard only allows structs/unions here, I see no
# reason to disallow other types since some compilers have typedefs
# here, and pycparser isn't about rejecting all invalid code.
#
node = spec['type'][0]
if isinstance(node, c_ast.Node):
decl_type = node
else:
decl_type = c_ast.IdentifierType(node)
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=decl_type)])
else:
# Structure/union members can have the same names as typedefs.
# The trouble is that the member's name gets grouped into
# specifier_qualifier_list; _build_declarations compensates.
#
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)])
p[0] = decls
def p_struct_declaration_2(self, p):
""" struct_declaration : specifier_qualifier_list abstract_declarator SEMI
"""
# "Abstract declarator?!", you ask? Structure members can have the
# same names as typedefs. The trouble is that the member's name gets
# grouped into specifier_qualifier_list, leaving any remainder to
# appear as an abstract declarator, as in:
# typedef int Foo;
# struct { Foo Foo[3]; };
#
p[0] = self._build_declarations(
spec=p[1],
decls=[dict(decl=p[2], init=None)])
def p_struct_declarator_list(self, p):
""" struct_declarator_list : struct_declarator
| struct_declarator_list COMMA struct_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# struct_declarator passes up a dict with the keys: decl (for
# the underlying declarator) and bitsize (for the bitsize)
#
def p_struct_declarator_1(self, p):
""" struct_declarator : declarator
"""
p[0] = {'decl': p[1], 'bitsize': None}
def p_struct_declarator_2(self, p):
""" struct_declarator : declarator COLON constant_expression
| COLON constant_expression
"""
if len(p) > 3:
p[0] = {'decl': p[1], 'bitsize': p[3]}
else:
p[0] = {'decl': c_ast.TypeDecl(None, None, None), 'bitsize': p[2]}
def p_enum_specifier_1(self, p):
""" enum_specifier : ENUM ID
| ENUM TYPEID
"""
p[0] = c_ast.Enum(p[2], None, self._coord(p.lineno(1)))
def p_enum_specifier_2(self, p):
""" enum_specifier : ENUM brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(None, p[3], self._coord(p.lineno(1)))
def p_enum_specifier_3(self, p):
""" enum_specifier : ENUM ID brace_open enumerator_list brace_close
| ENUM TYPEID brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(p[2], p[4], self._coord(p.lineno(1)))
def p_enumerator_list(self, p):
""" enumerator_list : enumerator
| enumerator_list COMMA
| enumerator_list COMMA enumerator
"""
if len(p) == 2:
p[0] = c_ast.EnumeratorList([p[1]], p[1].coord)
elif len(p) == 3:
p[0] = p[1]
else:
p[1].enumerators.append(p[3])
p[0] = p[1]
def p_enumerator(self, p):
""" enumerator : ID
| ID EQUALS constant_expression
"""
if len(p) == 2:
enumerator = c_ast.Enumerator(
p[1], None,
self._coord(p.lineno(1)))
else:
enumerator = c_ast.Enumerator(
p[1], p[3],
self._coord(p.lineno(1)))
self._add_identifier(enumerator.name, enumerator.coord)
p[0] = enumerator
def p_declarator_1(self, p):
""" declarator : direct_declarator
"""
p[0] = p[1]
def p_declarator_2(self, p):
""" declarator : pointer direct_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
# Since it's impossible for a type to be specified after a pointer, assume
# it's intended to be the name for this declaration. _add_identifier will
# raise an error if this TYPEID can't be redeclared.
#
def p_declarator_3(self, p):
""" declarator : pointer TYPEID
"""
decl = c_ast.TypeDecl(
declname=p[2],
type=None,
quals=None,
coord=self._coord(p.lineno(2)))
p[0] = self._type_modify_decl(decl, p[1])
def p_direct_declarator_1(self, p):
""" direct_declarator : ID
"""
p[0] = c_ast.TypeDecl(
declname=p[1],
type=None,
quals=None,
coord=self._coord(p.lineno(1)))
def p_direct_declarator_2(self, p):
""" direct_declarator : LPAREN declarator RPAREN
"""
p[0] = p[2]
def p_direct_declarator_3(self, p):
""" direct_declarator : direct_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET
"""
quals = (p[3] if len(p) > 5 else []) or []
# Accept dimension qualifiers
# Per C99 6.7.5.3 p7
arr = c_ast.ArrayDecl(
type=None,
dim=p[4] if len(p) > 5 else p[3],
dim_quals=quals,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_declarator_4(self, p):
""" direct_declarator : direct_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET
| direct_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET
"""
# Using slice notation for PLY objects doesn't work in Python 3 for the
# version of PLY embedded with pycparser; see PLY Google Code issue 30.
# Work around that here by listing the two elements separately.
listed_quals = [item if isinstance(item, list) else [item]
for item in [p[3],p[4]]]
dim_quals = [qual for sublist in listed_quals for qual in sublist
if qual is not None]
arr = c_ast.ArrayDecl(
type=None,
dim=p[5],
dim_quals=dim_quals,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
# Special for VLAs
#
def p_direct_declarator_5(self, p):
""" direct_declarator : direct_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[4], self._coord(p.lineno(4))),
dim_quals=p[3] if p[3] != None else [],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_declarator_6(self, p):
""" direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN
| direct_declarator LPAREN identifier_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
# To see why _get_yacc_lookahead_token is needed, consider:
# typedef char TT;
# void foo(int TT) { TT = 10; }
# Outside the function, TT is a typedef, but inside (starting and
# ending with the braces) it's a parameter. The trouble begins with
# yacc's lookahead token. We don't know if we're declaring or
# defining a function until we see LBRACE, but if we wait for yacc to
# trigger a rule on that token, then TT will have already been read
# and incorrectly interpreted as TYPEID. We need to add the
# parameters to the scope the moment the lexer sees LBRACE.
#
if self._get_yacc_lookahead_token().type == "LBRACE":
if func.args is not None:
for param in func.args.params:
if isinstance(param, c_ast.EllipsisParam): break
self._add_identifier(param.name, param.coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_pointer(self, p):
""" pointer : TIMES type_qualifier_list_opt
| TIMES type_qualifier_list_opt pointer
"""
coord = self._coord(p.lineno(1))
# Pointer decls nest from inside out. This is important when different
# levels have different qualifiers. For example:
#
# char * const * p;
#
# Means "pointer to const pointer to char"
#
# While:
#
# char ** const p;
#
# Means "const pointer to pointer to char"
#
# So when we construct PtrDecl nestings, the leftmost pointer goes in
# as the most nested type.
nested_type = c_ast.PtrDecl(quals=p[2] or [], type=None, coord=coord)
if len(p) > 3:
tail_type = p[3]
while tail_type.type is not None:
tail_type = tail_type.type
tail_type.type = nested_type
p[0] = p[3]
else:
p[0] = nested_type
def p_type_qualifier_list(self, p):
""" type_qualifier_list : type_qualifier
| type_qualifier_list type_qualifier
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_parameter_type_list(self, p):
""" parameter_type_list : parameter_list
| parameter_list COMMA ELLIPSIS
"""
if len(p) > 2:
p[1].params.append(c_ast.EllipsisParam(self._coord(p.lineno(3))))
p[0] = p[1]
def p_parameter_list(self, p):
""" parameter_list : parameter_declaration
| parameter_list COMMA parameter_declaration
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_parameter_declaration_1(self, p):
""" parameter_declaration : declaration_specifiers declarator
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))]
p[0] = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2])])[0]
def p_parameter_declaration_2(self, p):
""" parameter_declaration : declaration_specifiers abstract_declarator_opt
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))]
# Parameters can have the same names as typedefs. The trouble is that
# the parameter's name gets grouped into declaration_specifiers, making
# it look like an old-style declaration; compensate.
#
if len(spec['type']) > 1 and len(spec['type'][-1].names) == 1 and \
self._is_type_in_scope(spec['type'][-1].names[0]):
decl = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2], init=None)])[0]
# This truly is an old-style parameter declaration
#
else:
decl = c_ast.Typename(
name='',
quals=spec['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
typename = spec['type']
decl = self._fix_decl_name_type(decl, typename)
p[0] = decl
def p_identifier_list(self, p):
""" identifier_list : identifier
| identifier_list COMMA identifier
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_initializer_1(self, p):
""" initializer : assignment_expression
"""
p[0] = p[1]
def p_initializer_2(self, p):
""" initializer : brace_open initializer_list_opt brace_close
| brace_open initializer_list COMMA brace_close
"""
if p[2] is None:
p[0] = c_ast.InitList([], self._coord(p.lineno(1)))
else:
p[0] = p[2]
def p_initializer_list(self, p):
""" initializer_list : designation_opt initializer
| initializer_list COMMA designation_opt initializer
"""
if len(p) == 3: # single initializer
init = p[2] if p[1] is None else c_ast.NamedInitializer(p[1], p[2])
p[0] = c_ast.InitList([init], p[2].coord)
else:
init = p[4] if p[3] is None else c_ast.NamedInitializer(p[3], p[4])
p[1].exprs.append(init)
p[0] = p[1]
def p_designation(self, p):
""" designation : designator_list EQUALS
"""
p[0] = p[1]
# Designators are represented as a list of nodes, in the order in which
# they're written in the code.
#
def p_designator_list(self, p):
""" designator_list : designator
| designator_list designator
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_designator(self, p):
""" designator : LBRACKET constant_expression RBRACKET
| PERIOD identifier
"""
p[0] = p[2]
def p_type_name(self, p):
""" type_name : specifier_qualifier_list abstract_declarator_opt
"""
#~ print '=========='
#~ print p[1]
#~ print p[2]
#~ print p[2].children()
#~ print '=========='
typename = c_ast.Typename(
name='',
quals=p[1]['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
p[0] = self._fix_decl_name_type(typename, p[1]['type'])
def p_abstract_declarator_1(self, p):
""" abstract_declarator : pointer
"""
dummytype = c_ast.TypeDecl(None, None, None)
p[0] = self._type_modify_decl(
decl=dummytype,
modifier=p[1])
def p_abstract_declarator_2(self, p):
""" abstract_declarator : pointer direct_abstract_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
def p_abstract_declarator_3(self, p):
""" abstract_declarator : direct_abstract_declarator
"""
p[0] = p[1]
# Creating and using direct_abstract_declarator_opt here
# instead of listing both direct_abstract_declarator and the
# lack of it in the beginning of _1 and _2 caused two
# shift/reduce errors.
#
def p_direct_abstract_declarator_1(self, p):
""" direct_abstract_declarator : LPAREN abstract_declarator RPAREN """
p[0] = p[2]
def p_direct_abstract_declarator_2(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET assignment_expression_opt RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=p[3],
dim_quals=[],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_3(self, p):
""" direct_abstract_declarator : LBRACKET assignment_expression_opt RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=p[2],
dim_quals=[],
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_4(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
dim_quals=[],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_5(self, p):
""" direct_abstract_declarator : LBRACKET TIMES RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
dim_quals=[],
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_6(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_direct_abstract_declarator_7(self, p):
""" direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN
"""
p[0] = c_ast.FuncDecl(
args=p[2],
type=c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(1)))
# declaration is a list, statement isn't. To make it consistent, block_item
# will always be a list
#
def p_block_item(self, p):
""" block_item : declaration
| statement
"""
p[0] = p[1] if isinstance(p[1], list) else [p[1]]
# Since we made block_item a list, this just combines lists
#
def p_block_item_list(self, p):
""" block_item_list : block_item
| block_item_list block_item
"""
# Empty block items (plain ';') produce [None], so ignore them
p[0] = p[1] if (len(p) == 2 or p[2] == [None]) else p[1] + p[2]
def p_compound_statement_1(self, p):
""" compound_statement : brace_open block_item_list_opt brace_close """
p[0] = c_ast.Compound(
block_items=p[2],
coord=self._coord(p.lineno(1)))
def p_labeled_statement_1(self, p):
""" labeled_statement : ID COLON statement """
p[0] = c_ast.Label(p[1], p[3], self._coord(p.lineno(1)))
def p_labeled_statement_2(self, p):
""" labeled_statement : CASE constant_expression COLON statement """
p[0] = c_ast.Case(p[2], [p[4]], self._coord(p.lineno(1)))
def p_labeled_statement_3(self, p):
""" labeled_statement : DEFAULT COLON statement """
p[0] = c_ast.Default([p[3]], self._coord(p.lineno(1)))
def p_selection_statement_1(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement """
p[0] = c_ast.If(p[3], p[5], None, self._coord(p.lineno(1)))
def p_selection_statement_2(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement ELSE statement """
p[0] = c_ast.If(p[3], p[5], p[7], self._coord(p.lineno(1)))
def p_selection_statement_3(self, p):
""" selection_statement : SWITCH LPAREN expression RPAREN statement """
p[0] = fix_switch_cases(
c_ast.Switch(p[3], p[5], self._coord(p.lineno(1))))
def p_iteration_statement_1(self, p):
""" iteration_statement : WHILE LPAREN expression RPAREN statement """
p[0] = c_ast.While(p[3], p[5], self._coord(p.lineno(1)))
def p_iteration_statement_2(self, p):
""" iteration_statement : DO statement WHILE LPAREN expression RPAREN SEMI """
p[0] = c_ast.DoWhile(p[5], p[2], self._coord(p.lineno(1)))
def p_iteration_statement_3(self, p):
""" iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN statement """
p[0] = c_ast.For(p[3], p[5], p[7], p[9], self._coord(p.lineno(1)))
def p_iteration_statement_4(self, p):
""" iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN statement """
p[0] = c_ast.For(c_ast.DeclList(p[3], self._coord(p.lineno(1))),
p[4], p[6], p[8], self._coord(p.lineno(1)))
def p_jump_statement_1(self, p):
""" jump_statement : GOTO ID SEMI """
p[0] = c_ast.Goto(p[2], self._coord(p.lineno(1)))
def p_jump_statement_2(self, p):
""" jump_statement : BREAK SEMI """
p[0] = c_ast.Break(self._coord(p.lineno(1)))
def p_jump_statement_3(self, p):
""" jump_statement : CONTINUE SEMI """
p[0] = c_ast.Continue(self._coord(p.lineno(1)))
def p_jump_statement_4(self, p):
""" jump_statement : RETURN expression SEMI
| RETURN SEMI
"""
p[0] = c_ast.Return(p[2] if len(p) == 4 else None, self._coord(p.lineno(1)))
def p_expression_statement(self, p):
""" expression_statement : expression_opt SEMI """
if p[1] is None:
p[0] = c_ast.EmptyStatement(self._coord(p.lineno(1)))
else:
p[0] = p[1]
def p_expression(self, p):
""" expression : assignment_expression
| expression COMMA assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
if not isinstance(p[1], c_ast.ExprList):
p[1] = c_ast.ExprList([p[1]], p[1].coord)
p[1].exprs.append(p[3])
p[0] = p[1]
def p_typedef_name(self, p):
""" typedef_name : TYPEID """
p[0] = c_ast.IdentifierType([p[1]], coord=self._coord(p.lineno(1)))
def p_assignment_expression(self, p):
""" assignment_expression : conditional_expression
| unary_expression assignment_operator assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.Assignment(p[2], p[1], p[3], p[1].coord)
# K&R2 defines these as many separate rules, to encode
# precedence and associativity. Why work hard ? I'll just use
# the built in precedence/associativity specification feature
# of PLY. (see precedence declaration above)
#
def p_assignment_operator(self, p):
""" assignment_operator : EQUALS
| XOREQUAL
| TIMESEQUAL
| DIVEQUAL
| MODEQUAL
| PLUSEQUAL
| MINUSEQUAL
| LSHIFTEQUAL
| RSHIFTEQUAL
| ANDEQUAL
| OREQUAL
"""
p[0] = p[1]
def p_constant_expression(self, p):
""" constant_expression : conditional_expression """
p[0] = p[1]
def p_conditional_expression(self, p):
""" conditional_expression : binary_expression
| binary_expression CONDOP expression COLON conditional_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.TernaryOp(p[1], p[3], p[5], p[1].coord)
def p_binary_expression(self, p):
""" binary_expression : cast_expression
| binary_expression TIMES binary_expression
| binary_expression DIVIDE binary_expression
| binary_expression MOD binary_expression
| binary_expression PLUS binary_expression
| binary_expression MINUS binary_expression
| binary_expression RSHIFT binary_expression
| binary_expression LSHIFT binary_expression
| binary_expression LT binary_expression
| binary_expression LE binary_expression
| binary_expression GE binary_expression
| binary_expression GT binary_expression
| binary_expression EQ binary_expression
| binary_expression NE binary_expression
| binary_expression AND binary_expression
| binary_expression OR binary_expression
| binary_expression XOR binary_expression
| binary_expression LAND binary_expression
| binary_expression LOR binary_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.BinaryOp(p[2], p[1], p[3], p[1].coord)
def p_cast_expression_1(self, p):
""" cast_expression : unary_expression """
p[0] = p[1]
def p_cast_expression_2(self, p):
""" cast_expression : LPAREN type_name RPAREN cast_expression """
p[0] = c_ast.Cast(p[2], p[4], self._coord(p.lineno(1)))
def p_unary_expression_1(self, p):
""" unary_expression : postfix_expression """
p[0] = p[1]
def p_unary_expression_2(self, p):
""" unary_expression : PLUSPLUS unary_expression
| MINUSMINUS unary_expression
| unary_operator cast_expression
"""
p[0] = c_ast.UnaryOp(p[1], p[2], p[2].coord)
def p_unary_expression_3(self, p):
""" unary_expression : SIZEOF unary_expression
| SIZEOF LPAREN type_name RPAREN
"""
p[0] = c_ast.UnaryOp(
p[1],
p[2] if len(p) == 3 else p[3],
self._coord(p.lineno(1)))
def p_unary_operator(self, p):
""" unary_operator : AND
| TIMES
| PLUS
| MINUS
| NOT
| LNOT
"""
p[0] = p[1]
def p_postfix_expression_1(self, p):
""" postfix_expression : primary_expression """
p[0] = p[1]
def p_postfix_expression_2(self, p):
""" postfix_expression : postfix_expression LBRACKET expression RBRACKET """
p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord)
def p_postfix_expression_3(self, p):
""" postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN
| postfix_expression LPAREN RPAREN
"""
p[0] = c_ast.FuncCall(p[1], p[3] if len(p) == 5 else None, p[1].coord)
def p_postfix_expression_4(self, p):
""" postfix_expression : postfix_expression PERIOD ID
| postfix_expression PERIOD TYPEID
| postfix_expression ARROW ID
| postfix_expression ARROW TYPEID
"""
field = c_ast.ID(p[3], self._coord(p.lineno(3)))
p[0] = c_ast.StructRef(p[1], p[2], field, p[1].coord)
def p_postfix_expression_5(self, p):
""" postfix_expression : postfix_expression PLUSPLUS
| postfix_expression MINUSMINUS
"""
p[0] = c_ast.UnaryOp('p' + p[2], p[1], p[1].coord)
def p_postfix_expression_6(self, p):
""" postfix_expression : LPAREN type_name RPAREN brace_open initializer_list brace_close
| LPAREN type_name RPAREN brace_open initializer_list COMMA brace_close
"""
p[0] = c_ast.CompoundLiteral(p[2], p[5])
def p_primary_expression_1(self, p):
""" primary_expression : identifier """
p[0] = p[1]
def p_primary_expression_2(self, p):
""" primary_expression : constant """
p[0] = p[1]
def p_primary_expression_3(self, p):
""" primary_expression : unified_string_literal
| unified_wstring_literal
"""
p[0] = p[1]
def p_primary_expression_4(self, p):
""" primary_expression : LPAREN expression RPAREN """
p[0] = p[2]
def p_primary_expression_5(self, p):
""" primary_expression : OFFSETOF LPAREN type_name COMMA identifier RPAREN
"""
coord = self._coord(p.lineno(1))
p[0] = c_ast.FuncCall(c_ast.ID(p[1], coord),
c_ast.ExprList([p[3], p[5]], coord),
coord)
def p_argument_expression_list(self, p):
""" argument_expression_list : assignment_expression
| argument_expression_list COMMA assignment_expression
"""
if len(p) == 2: # single expr
p[0] = c_ast.ExprList([p[1]], p[1].coord)
else:
p[1].exprs.append(p[3])
p[0] = p[1]
def p_identifier(self, p):
""" identifier : ID """
p[0] = c_ast.ID(p[1], self._coord(p.lineno(1)))
def p_constant_1(self, p):
""" constant : INT_CONST_DEC
| INT_CONST_OCT
| INT_CONST_HEX
| INT_CONST_BIN
"""
p[0] = c_ast.Constant(
'int', p[1], self._coord(p.lineno(1)))
def p_constant_2(self, p):
""" constant : FLOAT_CONST
| HEX_FLOAT_CONST
"""
p[0] = c_ast.Constant(
'float', p[1], self._coord(p.lineno(1)))
def p_constant_3(self, p):
""" constant : CHAR_CONST
| WCHAR_CONST
"""
p[0] = c_ast.Constant(
'char', p[1], self._coord(p.lineno(1)))
# The "unified" string and wstring literal rules are for supporting
# concatenation of adjacent string literals.
# I.e. "hello " "world" is seen by the C compiler as a single string literal
# with the value "hello world"
#
def p_unified_string_literal(self, p):
""" unified_string_literal : STRING_LITERAL
| unified_string_literal STRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._coord(p.lineno(1)))
else:
p[1].value = p[1].value[:-1] + p[2][1:]
p[0] = p[1]
def p_unified_wstring_literal(self, p):
""" unified_wstring_literal : WSTRING_LITERAL
| unified_wstring_literal WSTRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._coord(p.lineno(1)))
else:
p[1].value = p[1].value.rstrip()[:-1] + p[2][2:]
p[0] = p[1]
def p_brace_open(self, p):
""" brace_open : LBRACE
"""
p[0] = p[1]
def p_brace_close(self, p):
""" brace_close : RBRACE
"""
p[0] = p[1]
def p_empty(self, p):
'empty : '
p[0] = None
def p_error(self, p):
# If error recovery is added here in the future, make sure
# _get_yacc_lookahead_token still works!
#
if p:
self._parse_error(
'before: %s' % p.value,
self._coord(lineno=p.lineno,
column=self.clex.find_tok_column(p)))
else:
self._parse_error('At end of input', '')
#------------------------------------------------------------------------------
if __name__ == "__main__":
import pprint
import time, sys
#t1 = time.time()
#parser = CParser(lex_optimize=True, yacc_debug=True, yacc_optimize=False)
#sys.write(time.time() - t1)
#buf = '''
#int (*k)(int);
#'''
## set debuglevel to 2 for debugging
#t = parser.parse(buf, 'x.c', debuglevel=0)
#t.show(showcoord=True)
| [
"noreply@github.com"
] | noreply@github.com |
08a65bb7db851c3827f50ea795ce9e58ad45c818 | 7eebbfaee45fdc57c4fc6ba32c87c35be1e62b14 | /airbyte-integrations/connectors/source-facebook-pages/source_facebook_pages/streams.py | 717fb1c76800fc295cff19b40b475069c0e2914a | [
"MIT",
"Elastic-2.0"
] | permissive | Velocity-Engineering/airbyte | b6e1fcead5b9fd7c74d50b9f27118654604dc8e0 | 802a8184cdd11c1eb905a54ed07c8732b0c0b807 | refs/heads/master | 2023-07-31T15:16:27.644737 | 2021-09-28T08:43:51 | 2021-09-28T08:43:51 | 370,730,633 | 0 | 1 | MIT | 2021-06-08T05:58:44 | 2021-05-25T14:55:43 | Java | UTF-8 | Python | false | false | 4,651 | py | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from abc import ABC
from typing import Any, Iterable, Mapping, MutableMapping, Optional
import requests
from airbyte_cdk.sources.streams.http import HttpStream
from source_facebook_pages.metrics import PAGE_FIELDS, PAGE_METRICS, POST_FIELDS, POST_METRICS
class FacebookPagesStream(HttpStream, ABC):
url_base = "https://graph.facebook.com/v11.0/"
primary_key = "id"
data_field = "data"
def __init__(
self,
access_token: str = None,
page_id: str = None,
**kwargs,
):
super().__init__(**kwargs)
self._access_token = access_token
self._page_id = page_id
@property
def path_param(self):
return self.name[:-1]
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
data = response.json()
if not data.get("data") or not data.get("paging"):
return {}
return {
"limit": 100,
"after": data.get("paging", {}).get("cursors", {}).get("after"),
}
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
next_page_token = next_page_token or {}
params = {"access_token": self._access_token, **next_page_token}
return params
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
if not self.data_field:
yield response.json()
records = response.json().get(self.data_field, [])
for record in records:
yield record
class Page(FacebookPagesStream):
"""
API docs: https://developers.facebook.com/docs/graph-api/reference/page/,
"""
data_field = ""
def path(self, **kwargs) -> str:
return self._page_id
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(**kwargs)
# we have to define which fields will return from Facebook API
# because FB API doesn't provide opportunity to get fields dynamically without delays
# so in PAGE_FIELDS we define fields that user can get from API
params["fields"] = PAGE_FIELDS
return params
class Post(FacebookPagesStream):
"""
https://developers.facebook.com/docs/graph-api/reference/v11.0/page/feed,
"""
def path(self, **kwargs) -> str:
return f"{self._page_id}/posts"
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(**kwargs)
params["fields"] = POST_FIELDS
return params
class PageInsights(FacebookPagesStream):
"""
API docs: https://developers.facebook.com/docs/graph-api/reference/page/insights/,
"""
def path(self, **kwargs) -> str:
return f"{self._page_id}/insights"
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state, stream_slice, next_page_token)
params["metric"] = ",".join(PAGE_METRICS)
return params
class PostInsights(FacebookPagesStream):
"""
API docs: https://developers.facebook.com/docs/graph-api/reference/post/insights/,
"""
def path(self, **kwargs) -> str:
return f"{self._page_id}/posts"
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state, stream_slice, next_page_token)
params["fields"] = f'insights.metric({",".join(POST_METRICS)})'
return params
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
# unique case so we override this method
records = response.json().get(self.data_field) or []
for insights in records:
if insights.get("insights"):
data = insights.get("insights").get("data")
for insight in data:
yield insight
else:
yield insights
| [
"noreply@github.com"
] | noreply@github.com |
3d1e771da9ec0f32bfd297a1b19794e9054adce4 | 1825283527f5a479204708feeaf55f4ab6d1290b | /leetcode/python/45/sol.py | 3db6f97188dd189aef4c4caf07b43524d9f7f299 | [] | no_license | frankieliu/problems | b82c61d3328ffcc1da2cbc95712563355f5d44b5 | 911c6622448a4be041834bcab25051dd0f9209b2 | refs/heads/master | 2023-01-06T14:41:58.044871 | 2019-11-24T03:47:22 | 2019-11-24T03:47:22 | 115,065,956 | 1 | 0 | null | 2023-01-04T07:25:52 | 2017-12-22T02:06:57 | HTML | UTF-8 | Python | false | false | 2,156 | py |
10-lines C++ (16ms) / Python BFS Solutions with Explanations
https://leetcode.com/problems/jump-game-ii/discuss/18019
* Lang: python3
* Author: jianchao-li
* Votes: 71
This problem has a nice BFS structure. Let's illustrate it using the example `nums = [2, 3, 1, 1, 4]` in the problem statement. We are initially at position `0`. Then we can move at most `nums[0]` steps from it. So, after one move, we may reach `nums[1] = 3` or `nums[2] = 1`. So these nodes are reachable in `1` move. From these nodes, we can further move to `nums[3] = 1` and `nums[4] = 4`. Now you can see that the target `nums[4] = 4` is reachable in `2` moves.
Putting these into codes, we keep two pointers `start` and `end` that record the current range of the starting nodes. Each time after we make a move, update `start` to be `end + 1` and `end` to be the farthest index that can be reached in `1` move from the current `[start, end]`.
To get an accepted solution, it is important to handle all the edge cases. And the following codes handle all of them in a unified way without using the unclean `if` statements :-)
----------
**C++**
class Solution {
public:
int jump(vector<int>& nums) {
int n = nums.size(), step = 0, start = 0, end = 0;
while (end < n - 1) {
step++;
int maxend = end + 1;
for (int i = start; i <= end; i++) {
if (i + nums[i] >= n - 1) return step;
maxend = max(maxend, i + nums[i]);
}
start = end + 1;
end = maxend;
}
return step;
}
};
----------
**Python**
class Solution:
# @param {integer[]} nums
# @return {integer}
def jump(self, nums):
n, start, end, step = len(nums), 0, 0, 0
while end < n - 1:
step += 1
maxend = end + 1
for i in range(start, end + 1):
if i + nums[i] >= n - 1:
return step
maxend = max(maxend, i + nums[i])
start, end = end + 1, maxend
return step
| [
"frankie.y.liu@gmail.com"
] | frankie.y.liu@gmail.com |
7dd79a81c2691091fdf63dedb45319a7eae1a591 | 0fb12be061ab050904ceea99f6a938985a0d8acf | /report_mako2pdf/lib/xhtml2pdf/reportlab_paragraph.py | eba9e9aa506f6c2e6a82f44c220787a1075fbb14 | [] | no_license | libermatos/Openerp_6.1 | d17fbff1f35948e0c4176e2ed34ac5d7f8453834 | 510df13df7ea651c055b408ad66c580ca29d4ad7 | refs/heads/master | 2023-06-19T00:24:36.002581 | 2021-07-07T01:17:20 | 2021-07-07T01:17:20 | 383,574,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71,161 | py | # -*- coding: utf-8 -*-
# Copyright ReportLab Europe Ltd. 2000-2008
# see license.txt for license details
# history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/paragraph.py
# Modifications by Dirk Holtwick, 2008
from string import join, whitespace
from operator import truth
from reportlab.pdfbase.pdfmetrics import stringWidth, getAscentDescent
from reportlab.platypus.paraparser import ParaParser
from reportlab.platypus.flowables import Flowable
from reportlab.lib.colors import Color
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.lib.textsplit import ALL_CANNOT_START
from copy import deepcopy
from reportlab.lib.abag import ABag
import re
PARAGRAPH_DEBUG = False
LEADING_FACTOR = 1.0
_wsc_re_split = re.compile('[%s]+' % re.escape(''.join((
u'\u0009', # HORIZONTAL TABULATION
u'\u000A', # LINE FEED
u'\u000B', # VERTICAL TABULATION
u'\u000C', # FORM FEED
u'\u000D', # CARRIAGE RETURN
u'\u001C', # FILE SEPARATOR
u'\u001D', # GROUP SEPARATOR
u'\u001E', # RECORD SEPARATOR
u'\u001F', # UNIT SEPARATOR
u'\u0020', # SPACE
u'\u0085', # NEXT LINE
#u'\u00A0', # NO-BREAK SPACE
u'\u1680', # OGHAM SPACE MARK
u'\u2000', # EN QUAD
u'\u2001', # EM QUAD
u'\u2002', # EN SPACE
u'\u2003', # EM SPACE
u'\u2004', # THREE-PER-EM SPACE
u'\u2005', # FOUR-PER-EM SPACE
u'\u2006', # SIX-PER-EM SPACE
u'\u2007', # FIGURE SPACE
u'\u2008', # PUNCTUATION SPACE
u'\u2009', # THIN SPACE
u'\u200A', # HAIR SPACE
u'\u200B', # ZERO WIDTH SPACE
u'\u2028', # LINE SEPARATOR
u'\u2029', # PARAGRAPH SEPARATOR
u'\u202F', # NARROW NO-BREAK SPACE
u'\u205F', # MEDIUM MATHEMATICAL SPACE
u'\u3000', # IDEOGRAPHIC SPACE
)))).split
def split(text, delim=None):
if type(text) is str:
text = text.decode('utf8')
if type(delim) is str:
delim = delim.decode('utf8')
elif delim is None and u'\xa0' in text:
return [uword.encode('utf8') for uword in _wsc_re_split(text)]
return [uword.encode('utf8') for uword in text.split(delim)]
def strip(text):
if type(text) is str:
text = text.decode('utf8')
return text.strip().encode('utf8')
class ParaLines(ABag):
"""
class ParaLines contains the broken into lines representation of Paragraphs
kind=0 Simple
fontName, fontSize, textColor apply to whole Paragraph
lines [(extraSpace1,words1),....,(extraspaceN,wordsN)]
kind==1 Complex
lines [FragLine1,...,FragLineN]
"""
class FragLine(ABag):
"""
class FragLine contains a styled line (ie a line with more than one style)::
extraSpace unused space for justification only
wordCount 1+spaces in line for justification purposes
words [ParaFrags] style text lumps to be concatenated together
fontSize maximum fontSize seen on the line; not used at present,
but could be used for line spacing.
"""
#our one and only parser
# XXXXX if the parser has any internal state using only one is probably a BAD idea!
_parser = ParaParser()
def _lineClean(L):
return join(filter(truth, split(strip(L))))
def cleanBlockQuotedText(text, joiner=' '):
"""This is an internal utility which takes triple-
quoted text form within the document and returns
(hopefully) the paragraph the user intended originally."""
L = filter(truth, map(_lineClean, split(text, '\n')))
return join(L, joiner)
def setXPos(tx, dx):
if dx > 1e-6 or dx < -1e-6:
tx.setXPos(dx)
def _leftDrawParaLine(tx, offset, extraspace, words, last=0):
setXPos(tx, offset)
tx._textOut(join(words), 1)
setXPos(tx, -offset)
return offset
def _centerDrawParaLine(tx, offset, extraspace, words, last=0):
m = offset + 0.5 * extraspace
setXPos(tx, m)
tx._textOut(join(words), 1)
setXPos(tx, -m)
return m
def _rightDrawParaLine(tx, offset, extraspace, words, last=0):
m = offset + extraspace
setXPos(tx, m)
tx._textOut(join(words), 1)
setXPos(tx, -m)
return m
def _justifyDrawParaLine(tx, offset, extraspace, words, last=0):
setXPos(tx, offset)
text = join(words)
if last:
#last one, left align
tx._textOut(text, 1)
else:
nSpaces = len(words) - 1
if nSpaces:
tx.setWordSpace(extraspace / float(nSpaces))
tx._textOut(text, 1)
tx.setWordSpace(0)
else:
tx._textOut(text, 1)
setXPos(tx, -offset)
return offset
def imgVRange(h, va, fontSize):
"""
return bottom,top offsets relative to baseline(0)
"""
if va == 'baseline':
iyo = 0
elif va in ('text-top', 'top'):
iyo = fontSize - h
elif va == 'middle':
iyo = fontSize - (1.2 * fontSize + h) * 0.5
elif va in ('text-bottom', 'bottom'):
iyo = fontSize - 1.2 * fontSize
elif va == 'super':
iyo = 0.5 * fontSize
elif va == 'sub':
iyo = -0.5 * fontSize
elif hasattr(va, 'normalizedValue'):
iyo = va.normalizedValue(fontSize)
else:
iyo = va
return iyo, iyo + h
_56 = 5. / 6
_16 = 1. / 6
def _putFragLine(cur_x, tx, line):
xs = tx.XtraState
cur_y = xs.cur_y
x0 = tx._x0
autoLeading = xs.autoLeading
leading = xs.leading
cur_x += xs.leftIndent
dal = autoLeading in ('min', 'max')
if dal:
if autoLeading == 'max':
ascent = max(_56 * leading, line.ascent)
descent = max(_16 * leading, -line.descent)
else:
ascent = line.ascent
descent = -line.descent
leading = ascent + descent
if tx._leading != leading:
tx.setLeading(leading)
if dal:
olb = tx._olb
if olb is not None:
xcy = olb - ascent
if tx._oleading != leading:
cur_y += leading - tx._oleading
if abs(xcy - cur_y) > 1e-8:
cur_y = xcy
tx.setTextOrigin(x0, cur_y)
xs.cur_y = cur_y
tx._olb = cur_y - descent
tx._oleading = leading
# Letter spacing
if xs.style.letterSpacing != 'normal':
tx.setCharSpace(int(xs.style.letterSpacing))
ws = getattr(tx, '_wordSpace', 0)
nSpaces = 0
words = line.words
for f in words:
if hasattr(f, 'cbDefn'):
cbDefn = f.cbDefn
kind = cbDefn.kind
if kind == 'img':
#draw image cbDefn,cur_y,cur_x
w = cbDefn.width
h = cbDefn.height
txfs = tx._fontsize
if txfs is None:
txfs = xs.style.fontSize
iy0, iy1 = imgVRange(h, cbDefn.valign, txfs)
cur_x_s = cur_x + nSpaces * ws
tx._canvas.drawImage(cbDefn.image.getImage(), cur_x_s, cur_y + iy0, w, h, mask='auto')
cur_x += w
cur_x_s += w
setXPos(tx, cur_x_s - tx._x0)
elif kind == 'barcode':
barcode = cbDefn.barcode
w = cbDefn.width
h = cbDefn.height
txfs = tx._fontsize
if txfs is None:
txfs = xs.style.fontSize
iy0, iy1 = imgVRange(h, cbDefn.valign, txfs)
cur_x_s = cur_x + nSpaces * ws
barcode.draw(canvas=tx._canvas, xoffset=cur_x_s)
cur_x += w
cur_x_s += w
setXPos(tx, cur_x_s - tx._x0)
else:
name = cbDefn.name
if kind == 'anchor':
tx._canvas.bookmarkHorizontal(name, cur_x, cur_y + leading)
else:
func = getattr(tx._canvas, name, None)
if not func:
raise AttributeError("Missing %s callback attribute '%s'" % (kind, name))
func(tx._canvas, kind, cbDefn.label)
if f is words[-1]:
if not tx._fontname:
tx.setFont(xs.style.fontName, xs.style.fontSize)
tx._textOut('', 1)
elif kind == 'img':
tx._textOut('', 1)
else:
cur_x_s = cur_x + nSpaces * ws
if (tx._fontname, tx._fontsize) != (f.fontName, f.fontSize):
tx._setFont(f.fontName, f.fontSize)
if xs.textColor != f.textColor:
xs.textColor = f.textColor
tx.setFillColor(f.textColor)
if xs.rise != f.rise:
xs.rise = f.rise
tx.setRise(f.rise)
text = f.text
tx._textOut(text, f is words[-1]) # cheap textOut
# XXX Modified for XHTML2PDF
# Background colors (done like underline)
if hasattr(f, "backColor"):
if xs.backgroundColor != f.backColor or xs.backgroundFontSize != f.fontSize:
if xs.backgroundColor is not None:
xs.backgrounds.append((xs.background_x, cur_x_s, xs.backgroundColor, xs.backgroundFontSize))
xs.background_x = cur_x_s
xs.backgroundColor = f.backColor
xs.backgroundFontSize = f.fontSize
# Underline
if not xs.underline and f.underline:
xs.underline = 1
xs.underline_x = cur_x_s
xs.underlineColor = f.textColor
elif xs.underline:
if not f.underline:
xs.underline = 0
xs.underlines.append((xs.underline_x, cur_x_s, xs.underlineColor))
xs.underlineColor = None
elif xs.textColor != xs.underlineColor:
xs.underlines.append((xs.underline_x, cur_x_s, xs.underlineColor))
xs.underlineColor = xs.textColor
xs.underline_x = cur_x_s
# Strike
if not xs.strike and f.strike:
xs.strike = 1
xs.strike_x = cur_x_s
xs.strikeColor = f.textColor
# XXX Modified for XHTML2PDF
xs.strikeFontSize = f.fontSize
elif xs.strike:
if not f.strike:
xs.strike = 0
# XXX Modified for XHTML2PDF
xs.strikes.append((xs.strike_x, cur_x_s, xs.strikeColor, xs.strikeFontSize))
xs.strikeColor = None
xs.strikeFontSize = None
elif xs.textColor != xs.strikeColor:
xs.strikes.append((xs.strike_x, cur_x_s, xs.strikeColor, xs.strikeFontSize))
xs.strikeColor = xs.textColor
xs.strikeFontSize = f.fontSize
xs.strike_x = cur_x_s
if f.link and not xs.link:
if not xs.link:
xs.link = f.link
xs.link_x = cur_x_s
xs.linkColor = xs.textColor
elif xs.link:
if not f.link:
xs.links.append((xs.link_x, cur_x_s, xs.link, xs.linkColor))
xs.link = None
xs.linkColor = None
elif f.link != xs.link or xs.textColor != xs.linkColor:
xs.links.append((xs.link_x, cur_x_s, xs.link, xs.linkColor))
xs.link = f.link
xs.link_x = cur_x_s
xs.linkColor = xs.textColor
txtlen = tx._canvas.stringWidth(text, tx._fontname, tx._fontsize)
cur_x += txtlen
nSpaces += text.count(' ')
cur_x_s = cur_x + (nSpaces - 1) * ws
# XXX Modified for XHTML2PDF
# Underline
if xs.underline:
xs.underlines.append((xs.underline_x, cur_x_s, xs.underlineColor))
# XXX Modified for XHTML2PDF
# Backcolor
if hasattr(f, "backColor"):
if xs.backgroundColor is not None:
xs.backgrounds.append((xs.background_x, cur_x_s, xs.backgroundColor, xs.backgroundFontSize))
# XXX Modified for XHTML2PDF
# Strike
if xs.strike:
xs.strikes.append((xs.strike_x, cur_x_s, xs.strikeColor, xs.strikeFontSize))
if xs.link:
xs.links.append((xs.link_x, cur_x_s, xs.link, xs.linkColor))
if tx._x0 != x0:
setXPos(tx, x0 - tx._x0)
def _leftDrawParaLineX( tx, offset, line, last=0):
setXPos(tx, offset)
_putFragLine(offset, tx, line)
setXPos(tx, -offset)
def _centerDrawParaLineX( tx, offset, line, last=0):
m = offset + 0.5 * line.extraSpace
setXPos(tx, m)
_putFragLine(m, tx, line)
setXPos(tx, -m)
def _rightDrawParaLineX( tx, offset, line, last=0):
m = offset + line.extraSpace
setXPos(tx, m)
_putFragLine(m, tx, line)
setXPos(tx, -m)
def _justifyDrawParaLineX( tx, offset, line, last=0):
setXPos(tx, offset)
extraSpace = line.extraSpace
nSpaces = line.wordCount - 1
if last or not nSpaces or abs(extraSpace) <= 1e-8 or line.lineBreak:
_putFragLine(offset, tx, line) # no space modification
else:
tx.setWordSpace(extraSpace / float(nSpaces))
_putFragLine(offset, tx, line)
tx.setWordSpace(0)
setXPos(tx, -offset)
def _sameFrag(f, g):
"""
returns 1 if two ParaFrags map out the same
"""
if (hasattr(f, 'cbDefn') or hasattr(g, 'cbDefn')
or hasattr(f, 'lineBreak') or hasattr(g, 'lineBreak')): return 0
for a in ('fontName', 'fontSize', 'textColor', 'backColor', 'rise', 'underline', 'strike', 'link'):
if getattr(f, a, None) != getattr(g, a, None): return 0
return 1
def _getFragWords(frags):
"""
given a Parafrag list return a list of fragwords
[[size, (f00,w00), ..., (f0n,w0n)],....,[size, (fm0,wm0), ..., (f0n,wmn)]]
each pair f,w represents a style and some string
each sublist represents a word
"""
R = []
W = []
n = 0
hangingStrip = False
for f in frags:
text = f.text
# of paragraphs
if text != '':
if hangingStrip:
hangingStrip = False
text = text.lstrip()
S = split(text)
if S == []:
S = ['']
if W != [] and text[0] in whitespace:
W.insert(0, n)
R.append(W)
W = []
n = 0
for w in S[:-1]:
W.append((f, w))
n += stringWidth(w, f.fontName, f.fontSize)
W.insert(0, n)
R.append(W)
W = []
n = 0
w = S[-1]
W.append((f, w))
n += stringWidth(w, f.fontName, f.fontSize)
if text and text[-1] in whitespace:
W.insert(0, n)
R.append(W)
W = []
n = 0
elif hasattr(f, 'cbDefn'):
w = getattr(f.cbDefn, 'width', 0)
if w:
if W != []:
W.insert(0, n)
R.append(W)
W = []
n = 0
R.append([w, (f, '')])
else:
W.append((f, ''))
elif hasattr(f, 'lineBreak'):
#pass the frag through. The line breaker will scan for it.
if W != []:
W.insert(0, n)
R.append(W)
W = []
n = 0
R.append([0, (f, '')])
hangingStrip = True
if W != []:
W.insert(0, n)
R.append(W)
return R
def _split_blParaSimple(blPara, start, stop):
f = blPara.clone()
for a in ('lines', 'kind', 'text'):
if hasattr(f, a): delattr(f, a)
f.words = []
for l in blPara.lines[start:stop]:
for w in l[1]:
f.words.append(w)
return [f]
def _split_blParaHard(blPara, start, stop):
f = []
lines = blPara.lines[start:stop]
for l in lines:
for w in l.words:
f.append(w)
if l is not lines[-1]:
i = len(f) - 1
while i >= 0 and hasattr(f[i], 'cbDefn') and not getattr(f[i].cbDefn, 'width', 0): i -= 1
if i >= 0:
g = f[i]
if not g.text:
g.text = ' '
elif g.text[-1] != ' ':
g.text += ' '
return f
def _drawBullet(canvas, offset, cur_y, bulletText, style):
"""
draw a bullet text could be a simple string or a frag list
"""
tx2 = canvas.beginText(style.bulletIndent, cur_y + getattr(style, "bulletOffsetY", 0))
tx2.setFont(style.bulletFontName, style.bulletFontSize)
tx2.setFillColor(hasattr(style, 'bulletColor') and style.bulletColor or style.textColor)
if isinstance(bulletText, basestring):
tx2.textOut(bulletText)
else:
for f in bulletText:
if hasattr(f, "image"):
image = f.image
width = image.drawWidth
height = image.drawHeight
gap = style.bulletFontSize * 0.25
img = image.getImage()
# print style.bulletIndent, offset, width
canvas.drawImage(
img,
style.leftIndent - width - gap,
cur_y + getattr(style, "bulletOffsetY", 0),
width,
height)
else:
tx2.setFont(f.fontName, f.fontSize)
tx2.setFillColor(f.textColor)
tx2.textOut(f.text)
canvas.drawText(tx2)
#AR making definition lists a bit less ugly
#bulletEnd = tx2.getX()
bulletEnd = tx2.getX() + style.bulletFontSize * 0.6
offset = max(offset, bulletEnd - style.leftIndent)
return offset
def _handleBulletWidth(bulletText, style, maxWidths):
"""
work out bullet width and adjust maxWidths[0] if neccessary
"""
if bulletText:
if isinstance(bulletText, basestring):
bulletWidth = stringWidth(bulletText, style.bulletFontName, style.bulletFontSize)
else:
#it's a list of fragments
bulletWidth = 0
for f in bulletText:
bulletWidth = bulletWidth + stringWidth(f.text, f.fontName, f.fontSize)
bulletRight = style.bulletIndent + bulletWidth + 0.6 * style.bulletFontSize
indent = style.leftIndent + style.firstLineIndent
if bulletRight > indent:
#..then it overruns, and we have less space available on line 1
maxWidths[0] -= (bulletRight - indent)
def splitLines0(frags, widths):
"""
given a list of ParaFrags we return a list of ParaLines
each ParaLine has
1) ExtraSpace
2) blankCount
3) [textDefns....]
each text definition is a (ParaFrag, start, limit) triplet
"""
#initialise the algorithm
lines = []
lineNum = 0
maxW = widths[lineNum]
i = -1
l = len(frags)
lim = start = 0
while 1:
#find a non whitespace character
while i < l:
while start < lim and text[start] == ' ': start += 1
if start == lim:
i += 1
if i == l: break
start = 0
f = frags[i]
text = f.text
lim = len(text)
else:
break # we found one
if start == lim: break # if we didn't find one we are done
#start of a line
g = (None, None, None)
line = []
cLen = 0
nSpaces = 0
while cLen < maxW:
j = text.find(' ', start)
if j < 0:
j == lim
w = stringWidth(text[start:j], f.fontName, f.fontSize)
cLen += w
if cLen > maxW and line != []:
cLen = cLen - w
#this is the end of the line
while g.text[lim] == ' ':
lim -= 1
nSpaces -= 1
break
if j < 0:
j = lim
if g[0] is f:
g[2] = j #extend
else:
g = (f, start, j)
line.append(g)
if j == lim:
i += 1
def _do_under_line(i, t_off, ws, tx, lm=-0.125):
y = tx.XtraState.cur_y - i * tx.XtraState.style.leading + lm * tx.XtraState.f.fontSize
textlen = tx._canvas.stringWidth(join(tx.XtraState.lines[i][1]), tx._fontname, tx._fontsize)
tx._canvas.line(t_off, y, t_off + textlen + ws, y)
_scheme_re = re.compile('^[a-zA-Z][-+a-zA-Z0-9]+$')
def _doLink(tx, link, rect):
if isinstance(link, unicode):
link = link.encode('utf8')
parts = link.split(':', 1)
scheme = len(parts) == 2 and parts[0].lower() or ''
if _scheme_re.match(scheme) and scheme != 'document':
kind = scheme.lower() == 'pdf' and 'GoToR' or 'URI'
if kind == 'GoToR': link = parts[1]
tx._canvas.linkURL(link, rect, relative=1, kind=kind)
else:
if link[0] == '#':
link = link[1:]
scheme = ''
tx._canvas.linkRect("", scheme != 'document' and link or parts[1], rect, relative=1)
def _do_link_line(i, t_off, ws, tx):
xs = tx.XtraState
leading = xs.style.leading
y = xs.cur_y - i * leading - xs.f.fontSize / 8.0 # 8.0 factor copied from para.py
text = join(xs.lines[i][1])
textlen = tx._canvas.stringWidth(text, tx._fontname, tx._fontsize)
_doLink(tx, xs.link, (t_off, y, t_off + textlen + ws, y + leading))
# XXX Modified for XHTML2PDF
def _do_post_text(tx):
"""
Try to find out what the variables mean:
tx A structure containing more informations about paragraph ???
leading Height of lines
ff 1/8 of the font size
y0 The "baseline" postion ???
y 1/8 below the baseline
"""
xs = tx.XtraState
leading = xs.style.leading
autoLeading = xs.autoLeading
f = xs.f
if autoLeading == 'max':
# leading = max(leading, f.fontSize)
leading = max(leading, LEADING_FACTOR * f.fontSize)
elif autoLeading == 'min':
leading = LEADING_FACTOR * f.fontSize
ff = 0.125 * f.fontSize
y0 = xs.cur_y
y = y0 - ff
# Background
for x1, x2, c, fs in xs.backgrounds:
inlineFF = fs * 0.125
gap = inlineFF * 1.25
tx._canvas.setFillColor(c)
tx._canvas.rect(x1, y - gap, x2 - x1, fs + 1, fill=1, stroke=0)
xs.backgrounds = []
xs.background = 0
xs.backgroundColor = None
xs.backgroundFontSize = None
# Underline
yUnderline = y0 - 1.5 * ff
tx._canvas.setLineWidth(ff * 0.75)
csc = None
for x1, x2, c in xs.underlines:
if c != csc:
tx._canvas.setStrokeColor(c)
csc = c
tx._canvas.line(x1, yUnderline, x2, yUnderline)
xs.underlines = []
xs.underline = 0
xs.underlineColor = None
# Strike
for x1, x2, c, fs in xs.strikes:
inlineFF = fs * 0.125
ys = y0 + 2 * inlineFF
if c != csc:
tx._canvas.setStrokeColor(c)
csc = c
tx._canvas.setLineWidth(inlineFF * 0.75)
tx._canvas.line(x1, ys, x2, ys)
xs.strikes = []
xs.strike = 0
xs.strikeColor = None
yl = y + leading
for x1, x2, link, c in xs.links:
# No automatic underlining for links, never!
_doLink(tx, link, (x1, y, x2, yl))
xs.links = []
xs.link = None
xs.linkColor = None
xs.cur_y -= leading
def textTransformFrags(frags, style):
tt = style.textTransform
if tt:
tt = tt.lower()
if tt == 'lowercase':
tt = unicode.lower
elif tt == 'uppercase':
tt = unicode.upper
elif tt == 'capitalize':
tt = unicode.title
elif tt == 'none':
return
else:
raise ValueError('ParaStyle.textTransform value %r is invalid' % style.textTransform)
n = len(frags)
if n == 1:
#single fragment the easy case
frags[0].text = tt(frags[0].text.decode('utf8')).encode('utf8')
elif tt is unicode.title:
pb = True
for f in frags:
t = f.text
if not t: continue
u = t.decode('utf8')
if u.startswith(u' ') or pb:
u = tt(u)
else:
i = u.find(u' ')
if i >= 0:
u = u[:i] + tt(u[i:])
pb = u.endswith(u' ')
f.text = u.encode('utf8')
else:
for f in frags:
t = f.text
if not t: continue
f.text = tt(t.decode('utf8')).encode('utf8')
class cjkU(unicode):
"""
simple class to hold the frag corresponding to a str
"""
def __new__(cls, value, frag, encoding):
self = unicode.__new__(cls, value)
self._frag = frag
if hasattr(frag, 'cbDefn'):
w = getattr(frag.cbDefn, 'width', 0)
self._width = w
else:
self._width = stringWidth(value, frag.fontName, frag.fontSize)
return self
frag = property(lambda self: self._frag)
width = property(lambda self: self._width)
def makeCJKParaLine(U, extraSpace, calcBounds):
words = []
CW = []
f0 = FragLine()
maxSize = maxAscent = minDescent = 0
for u in U:
f = u.frag
fontSize = f.fontSize
if calcBounds:
cbDefn = getattr(f, 'cbDefn', None)
if getattr(cbDefn, 'width', 0):
descent, ascent = imgVRange(cbDefn.height, cbDefn.valign, fontSize)
else:
ascent, descent = getAscentDescent(f.fontName, fontSize)
else:
ascent, descent = getAscentDescent(f.fontName, fontSize)
maxSize = max(maxSize, fontSize)
maxAscent = max(maxAscent, ascent)
minDescent = min(minDescent, descent)
if not _sameFrag(f0, f):
f0 = f0.clone()
f0.text = u''.join(CW)
words.append(f0)
CW = []
f0 = f
CW.append(u)
if CW:
f0 = f0.clone()
f0.text = u''.join(CW)
words.append(f0)
return FragLine(kind=1, extraSpace=extraSpace, wordCount=1, words=words[1:], fontSize=maxSize, ascent=maxAscent,
descent=minDescent)
def cjkFragSplit(frags, maxWidths, calcBounds, encoding='utf8'):
"""
This attempts to be wordSplit for frags using the dumb algorithm
"""
from reportlab.rl_config import _FUZZ
U = [] # get a list of single glyphs with their widths etc etc
for f in frags:
text = f.text
if not isinstance(text, unicode):
text = text.decode(encoding)
if text:
U.extend([cjkU(t, f, encoding) for t in text])
else:
U.append(cjkU(text, f, encoding))
lines = []
widthUsed = lineStartPos = 0
maxWidth = maxWidths[0]
for i, u in enumerate(U):
w = u.width
widthUsed += w
lineBreak = hasattr(u.frag, 'lineBreak')
endLine = (widthUsed > maxWidth + _FUZZ and widthUsed > 0) or lineBreak
if endLine:
if lineBreak: continue
extraSpace = maxWidth - widthUsed + w
#This is the most important of the Japanese typography rules.
#if next character cannot start a line, wrap it up to this line so it hangs
#in the right margin. We won't do two or more though - that's unlikely and
#would result in growing ugliness.
nextChar = U[i]
if nextChar in ALL_CANNOT_START:
extraSpace -= w
i += 1
lines.append(makeCJKParaLine(U[lineStartPos:i], extraSpace, calcBounds))
try:
maxWidth = maxWidths[len(lines)]
except IndexError:
maxWidth = maxWidths[-1] # use the last one
lineStartPos = i
widthUsed = w
i -= 1
#any characters left?
if widthUsed > 0:
lines.append(makeCJKParaLine(U[lineStartPos:], maxWidth - widthUsed, calcBounds))
return ParaLines(kind=1, lines=lines)
class Paragraph(Flowable):
"""
Paragraph(text, style, bulletText=None, caseSensitive=1)
text a string of stuff to go into the paragraph.
style is a style definition as in reportlab.lib.styles.
bulletText is an optional bullet defintion.
caseSensitive set this to 0 if you want the markup tags and their attributes to be case-insensitive.
This class is a flowable that can format a block of text
into a paragraph with a given style.
The paragraph Text can contain XML-like markup including the tags:
<b> ... </b> - bold
<i> ... </i> - italics
<u> ... </u> - underline
<strike> ... </strike> - strike through
<super> ... </super> - superscript
<sub> ... </sub> - subscript
<font name=fontfamily/fontname color=colorname size=float>
<onDraw name=callable label="a label">
<link>link text</link>
attributes of links
size/fontSize=num
name/face/fontName=name
fg/textColor/color=color
backcolor/backColor/bgcolor=color
dest/destination/target/href/link=target
<a>anchor text</a>
attributes of anchors
fontSize=num
fontName=name
fg/textColor/color=color
backcolor/backColor/bgcolor=color
href=href
<a name="anchorpoint"/>
<unichar name="unicode character name"/>
<unichar value="unicode code point"/>
<img src="path" width="1in" height="1in" valign="bottom"/>
The whole may be surrounded by <para> </para> tags
The <b> and <i> tags will work for the built-in fonts (Helvetica
/Times / Courier). For other fonts you need to register a family
of 4 fonts using reportlab.pdfbase.pdfmetrics.registerFont; then
use the addMapping function to tell the library that these 4 fonts
form a family e.g.
from reportlab.lib.fonts import addMapping
addMapping('Vera', 0, 0, 'Vera') #normal
addMapping('Vera', 0, 1, 'Vera-Italic') #italic
addMapping('Vera', 1, 0, 'Vera-Bold') #bold
addMapping('Vera', 1, 1, 'Vera-BoldItalic') #italic and bold
It will also be able to handle any MathML specified Greek characters.
"""
def __init__(self, text, style, bulletText=None, frags=None, caseSensitive=1, encoding='utf8'):
self.caseSensitive = caseSensitive
self.encoding = encoding
self._setup(text, style, bulletText, frags, cleanBlockQuotedText)
def __repr__(self):
n = self.__class__.__name__
L = [n + "("]
keys = self.__dict__.keys()
for k in keys:
v = getattr(self, k)
rk = repr(k)
rv = repr(v)
rk = " " + rk.replace("\n", "\n ")
rv = " " + rk.replace("\n", "\n ")
L.append(rk)
L.append(rv)
L.append(") #" + n)
return '\n'.join(L)
def _setup(self, text, style, bulletText, frags, cleaner):
if frags is None:
text = cleaner(text)
_parser.caseSensitive = self.caseSensitive
style, frags, bulletTextFrags = _parser.parse(text, style)
if frags is None:
raise ValueError("xml parser error (%s) in paragraph beginning\n'%s'" \
% (_parser.errors[0], text[:min(30, len(text))]))
textTransformFrags(frags, style)
if bulletTextFrags: bulletText = bulletTextFrags
#AR hack
self.text = text
self.frags = frags
self.style = style
self.bulletText = bulletText
self.debug = PARAGRAPH_DEBUG # turn this on to see a pretty one with all the margins etc.
def wrap(self, availWidth, availHeight):
if self.debug:
print id(self), "wrap"
try:
print repr(self.getPlainText()[:80])
except:
print "???"
# work out widths array for breaking
self.width = availWidth
style = self.style
leftIndent = style.leftIndent
first_line_width = availWidth - (leftIndent + style.firstLineIndent) - style.rightIndent
later_widths = availWidth - leftIndent - style.rightIndent
if style.wordWrap == 'CJK':
#use Asian text wrap algorithm to break characters
blPara = self.breakLinesCJK([first_line_width, later_widths])
else:
blPara = self.breakLines([first_line_width, later_widths])
self.blPara = blPara
autoLeading = getattr(self, 'autoLeading', getattr(style, 'autoLeading', ''))
leading = style.leading
if blPara.kind == 1 and autoLeading not in ('', 'off'):
height = 0
if autoLeading == 'max':
for l in blPara.lines:
height += max(l.ascent - l.descent, leading)
elif autoLeading == 'min':
for l in blPara.lines:
height += l.ascent - l.descent
else:
raise ValueError('invalid autoLeading value %r' % autoLeading)
else:
if autoLeading == 'max':
leading = max(leading, LEADING_FACTOR * style.fontSize)
elif autoLeading == 'min':
leading = LEADING_FACTOR * style.fontSize
height = len(blPara.lines) * leading
self.height = height
return self.width, height
def minWidth(self):
"""
Attempt to determine a minimum sensible width
"""
frags = self.frags
nFrags = len(frags)
if not nFrags: return 0
if nFrags == 1:
f = frags[0]
fS = f.fontSize
fN = f.fontName
words = hasattr(f, 'text') and split(f.text, ' ') or f.words
func = lambda w, fS=fS, fN=fN: stringWidth(w, fN, fS)
else:
words = _getFragWords(frags)
func = lambda x: x[0]
return max(map(func, words))
def _get_split_blParaFunc(self):
return self.blPara.kind == 0 and _split_blParaSimple or _split_blParaHard
def split(self, availWidth, availHeight):
if self.debug:
print id(self), "split"
if len(self.frags) <= 0: return []
#the split information is all inside self.blPara
if not hasattr(self, 'blPara'):
self.wrap(availWidth, availHeight)
blPara = self.blPara
style = self.style
autoLeading = getattr(self, 'autoLeading', getattr(style, 'autoLeading', ''))
leading = style.leading
lines = blPara.lines
if blPara.kind == 1 and autoLeading not in ('', 'off'):
s = height = 0
if autoLeading == 'max':
for i, l in enumerate(blPara.lines):
h = max(l.ascent - l.descent, leading)
n = height + h
if n > availHeight + 1e-8:
break
height = n
s = i + 1
elif autoLeading == 'min':
for i, l in enumerate(blPara.lines):
n = height + l.ascent - l.descent
if n > availHeight + 1e-8:
break
height = n
s = i + 1
else:
raise ValueError('invalid autoLeading value %r' % autoLeading)
else:
l = leading
if autoLeading == 'max':
l = max(leading, LEADING_FACTOR * style.fontSize)
elif autoLeading == 'min':
l = LEADING_FACTOR * style.fontSize
s = int(availHeight / l)
height = s * l
n = len(lines)
allowWidows = getattr(self, 'allowWidows', getattr(self, 'allowWidows', 1))
allowOrphans = getattr(self, 'allowOrphans', getattr(self, 'allowOrphans', 0))
if not allowOrphans:
if s <= 1: # orphan?
del self.blPara
return []
if n <= s: return [self]
if not allowWidows:
if n == s + 1: # widow?
if (allowOrphans and n == 3) or n > 3:
s -= 1 # give the widow some company
else:
del self.blPara # no room for adjustment; force the whole para onwards
return []
func = self._get_split_blParaFunc()
P1 = self.__class__(None, style, bulletText=self.bulletText, frags=func(blPara, 0, s))
#this is a major hack
P1.blPara = ParaLines(kind=1, lines=blPara.lines[0:s], aH=availHeight, aW=availWidth)
P1._JustifyLast = 1
P1._splitpara = 1
P1.height = height
P1.width = availWidth
if style.firstLineIndent != 0:
style = deepcopy(style)
style.firstLineIndent = 0
P2 = self.__class__(None, style, bulletText=None, frags=func(blPara, s, n))
for a in ('autoLeading', # possible attributes that might be directly on self.
):
if hasattr(self, a):
setattr(P1, a, getattr(self, a))
setattr(P2, a, getattr(self, a))
return [P1, P2]
def draw(self):
#call another method for historical reasons. Besides, I
#suspect I will be playing with alternate drawing routines
#so not doing it here makes it easier to switch.
self.drawPara(self.debug)
def breakLines(self, width):
"""
Returns a broken line structure. There are two cases
A) For the simple case of a single formatting input fragment the output is
A fragment specifier with
- kind = 0
- fontName, fontSize, leading, textColor
- lines= A list of lines
Each line has two items.
1. unused width in points
2. word list
B) When there is more than one input formatting fragment the output is
A fragment specifier with
- kind = 1
- lines= A list of fragments each having fields
- extraspace (needed for justified)
- fontSize
- words=word list
each word is itself a fragment with
various settings
This structure can be used to easily draw paragraphs with the various alignments.
You can supply either a single width or a list of widths; the latter will have its
last item repeated until necessary. A 2-element list is useful when there is a
different first line indent; a longer list could be created to facilitate custom wraps
around irregular objects.
"""
if self.debug:
print id(self), "breakLines"
if not isinstance(width, (tuple, list)):
maxWidths = [width]
else:
maxWidths = width
lines = []
lineno = 0
style = self.style
#for bullets, work out width and ensure we wrap the right amount onto line one
_handleBulletWidth(self.bulletText, style, maxWidths)
maxWidth = maxWidths[0]
self.height = 0
autoLeading = getattr(self, 'autoLeading', getattr(style, 'autoLeading', ''))
calcBounds = autoLeading not in ('', 'off')
frags = self.frags
nFrags = len(frags)
if nFrags == 1 and not hasattr(frags[0], 'cbDefn'):
f = frags[0]
fontSize = f.fontSize
fontName = f.fontName
ascent, descent = getAscentDescent(fontName, fontSize)
words = hasattr(f, 'text') and split(f.text, ' ') or f.words
spaceWidth = stringWidth(' ', fontName, fontSize, self.encoding)
cLine = []
currentWidth = -spaceWidth # hack to get around extra space for word 1
for word in words:
#this underscores my feeling that Unicode throughout would be easier!
wordWidth = stringWidth(word, fontName, fontSize, self.encoding)
newWidth = currentWidth + spaceWidth + wordWidth
if newWidth <= maxWidth or not len(cLine):
# fit one more on this line
cLine.append(word)
currentWidth = newWidth
else:
if currentWidth > self.width: self.width = currentWidth
#end of line
lines.append((maxWidth - currentWidth, cLine))
cLine = [word]
currentWidth = wordWidth
lineno += 1
try:
maxWidth = maxWidths[lineno]
except IndexError:
maxWidth = maxWidths[-1] # use the last one
#deal with any leftovers on the final line
if cLine != []:
if currentWidth > self.width: self.width = currentWidth
lines.append((maxWidth - currentWidth, cLine))
return f.clone(kind=0, lines=lines, ascent=ascent, descent=descent, fontSize=fontSize)
elif nFrags <= 0:
return ParaLines(kind=0, fontSize=style.fontSize, fontName=style.fontName,
textColor=style.textColor, ascent=style.fontSize, descent=-0.2 * style.fontSize,
lines=[])
else:
if hasattr(self, 'blPara') and getattr(self, '_splitpara', 0):
#NB this is an utter hack that awaits the proper information
#preserving splitting algorithm
return self.blPara
n = 0
words = []
for w in _getFragWords(frags):
f = w[-1][0]
fontName = f.fontName
fontSize = f.fontSize
spaceWidth = stringWidth(' ', fontName, fontSize)
if not words:
currentWidth = -spaceWidth # hack to get around extra space for word 1
maxSize = fontSize
maxAscent, minDescent = getAscentDescent(fontName, fontSize)
wordWidth = w[0]
f = w[1][0]
if wordWidth > 0:
newWidth = currentWidth + spaceWidth + wordWidth
else:
newWidth = currentWidth
#test to see if this frag is a line break. If it is we will only act on it
#if the current width is non-negative or the previous thing was a deliberate lineBreak
lineBreak = hasattr(f, 'lineBreak')
endLine = (newWidth > maxWidth and n > 0) or lineBreak
if not endLine:
if lineBreak: continue #throw it away
nText = w[1][1]
if nText: n += 1
fontSize = f.fontSize
if calcBounds:
cbDefn = getattr(f, 'cbDefn', None)
if getattr(cbDefn, 'width', 0):
descent, ascent = imgVRange(cbDefn.height, cbDefn.valign, fontSize)
else:
ascent, descent = getAscentDescent(f.fontName, fontSize)
else:
ascent, descent = getAscentDescent(f.fontName, fontSize)
maxSize = max(maxSize, fontSize)
maxAscent = max(maxAscent, ascent)
minDescent = min(minDescent, descent)
if not words:
g = f.clone()
words = [g]
g.text = nText
elif not _sameFrag(g, f):
if currentWidth > 0 and ((nText != '' and nText[0] != ' ') or hasattr(f, 'cbDefn')):
if hasattr(g, 'cbDefn'):
i = len(words) - 1
while i >= 0:
wi = words[i]
cbDefn = getattr(wi, 'cbDefn', None)
if cbDefn:
if not getattr(cbDefn, 'width', 0):
i -= 1
continue
if not wi.text.endswith(' '):
wi.text += ' '
break
else:
if not g.text.endswith(' '):
g.text += ' '
g = f.clone()
words.append(g)
g.text = nText
else:
if nText != '' and nText[0] != ' ':
g.text += ' ' + nText
for i in w[2:]:
g = i[0].clone()
g.text = i[1]
words.append(g)
fontSize = g.fontSize
if calcBounds:
cbDefn = getattr(g, 'cbDefn', None)
if getattr(cbDefn, 'width', 0):
descent, ascent = imgVRange(cbDefn.height, cbDefn.valign, fontSize)
else:
ascent, descent = getAscentDescent(g.fontName, fontSize)
else:
ascent, descent = getAscentDescent(g.fontName, fontSize)
maxSize = max(maxSize, fontSize)
maxAscent = max(maxAscent, ascent)
minDescent = min(minDescent, descent)
currentWidth = newWidth
else: # either it won't fit, or it's a lineBreak tag
if lineBreak:
g = f.clone()
words.append(g)
if currentWidth > self.width: self.width = currentWidth
#end of line
lines.append(FragLine(extraSpace=maxWidth - currentWidth, wordCount=n,
lineBreak=lineBreak, words=words, fontSize=maxSize, ascent=maxAscent,
descent=minDescent))
#start new line
lineno += 1
try:
maxWidth = maxWidths[lineno]
except IndexError:
maxWidth = maxWidths[-1] # use the last one
if lineBreak:
n = 0
words = []
continue
currentWidth = wordWidth
n = 1
g = f.clone()
maxSize = g.fontSize
if calcBounds:
cbDefn = getattr(g, 'cbDefn', None)
if getattr(cbDefn, 'width', 0):
minDescent, maxAscent = imgVRange(cbDefn.height, cbDefn.valign, maxSize)
else:
maxAscent, minDescent = getAscentDescent(g.fontName, maxSize)
else:
maxAscent, minDescent = getAscentDescent(g.fontName, maxSize)
words = [g]
g.text = w[1][1]
for i in w[2:]:
g = i[0].clone()
g.text = i[1]
words.append(g)
fontSize = g.fontSize
if calcBounds:
cbDefn = getattr(g, 'cbDefn', None)
if getattr(cbDefn, 'width', 0):
descent, ascent = imgVRange(cbDefn.height, cbDefn.valign, fontSize)
else:
ascent, descent = getAscentDescent(g.fontName, fontSize)
else:
ascent, descent = getAscentDescent(g.fontName, fontSize)
maxSize = max(maxSize, fontSize)
maxAscent = max(maxAscent, ascent)
minDescent = min(minDescent, descent)
#deal with any leftovers on the final line
if words != []:
if currentWidth > self.width: self.width = currentWidth
lines.append(ParaLines(extraSpace=(maxWidth - currentWidth), wordCount=n,
words=words, fontSize=maxSize, ascent=maxAscent, descent=minDescent))
return ParaLines(kind=1, lines=lines)
return lines
def breakLinesCJK(self, width):
"""Initially, the dumbest possible wrapping algorithm.
Cannot handle font variations."""
if self.debug:
print id(self), "breakLinesCJK"
if not isinstance(width, (list, tuple)):
maxWidths = [width]
else:
maxWidths = width
style = self.style
#for bullets, work out width and ensure we wrap the right amount onto line one
_handleBulletWidth(self.bulletText, style, maxWidths)
if len(self.frags) > 1:
autoLeading = getattr(self, 'autoLeading', getattr(style, 'autoLeading', ''))
calcBounds = autoLeading not in ('', 'off')
return cjkFragSplit(self.frags, maxWidths, calcBounds, self.encoding)
elif not len(self.frags):
return ParaLines(kind=0, fontSize=style.fontSize, fontName=style.fontName,
textColor=style.textColor, lines=[], ascent=style.fontSize, descent=-0.2 * style.fontSize)
f = self.frags[0]
if 1 and hasattr(self, 'blPara') and getattr(self, '_splitpara', 0):
#NB this is an utter hack that awaits the proper information
#preserving splitting algorithm
return f.clone(kind=0, lines=self.blPara.lines)
lines = []
lineno = 0
self.height = 0
f = self.frags[0]
if hasattr(f, 'text'):
text = f.text
else:
text = ''.join(getattr(f, 'words', []))
from reportlab.lib.textsplit import wordSplit
lines = wordSplit(text, maxWidths[0], f.fontName, f.fontSize)
#the paragraph drawing routine assumes multiple frags per line, so we need an
#extra list like this
# [space, [text]]
#
wrappedLines = [(sp, [line]) for (sp, line) in lines]
return f.clone(kind=0, lines=wrappedLines, ascent=f.fontSize, descent=-0.2 * f.fontSize)
def beginText(self, x, y):
return self.canv.beginText(x, y)
def drawPara(self, debug=0):
"""Draws a paragraph according to the given style.
Returns the final y position at the bottom. Not safe for
paragraphs without spaces e.g. Japanese; wrapping
algorithm will go infinite."""
if self.debug:
print id(self), "drawPara", self.blPara.kind
#stash the key facts locally for speed
canvas = self.canv
style = self.style
blPara = self.blPara
lines = blPara.lines
leading = style.leading
autoLeading = getattr(self, 'autoLeading', getattr(style, 'autoLeading', ''))
#work out the origin for line 1
leftIndent = style.leftIndent
cur_x = leftIndent
if debug:
bw = 0.5
bc = Color(1, 1, 0)
bg = Color(0.9, 0.9, 0.9)
else:
bw = getattr(style, 'borderWidth', None)
bc = getattr(style, 'borderColor', None)
bg = style.backColor
#if has a background or border, draw it
if bg or (bc and bw):
canvas.saveState()
op = canvas.rect
kwds = dict(fill=0, stroke=0)
if bc and bw:
canvas.setStrokeColor(bc)
canvas.setLineWidth(bw)
kwds['stroke'] = 1
br = getattr(style, 'borderRadius', 0)
if br and not debug:
op = canvas.roundRect
kwds['radius'] = br
if bg:
canvas.setFillColor(bg)
kwds['fill'] = 1
bp = getattr(style, 'borderPadding', 0)
op(leftIndent - bp,
-bp,
self.width - (leftIndent + style.rightIndent) + 2 * bp,
self.height + 2 * bp,
**kwds)
canvas.restoreState()
nLines = len(lines)
bulletText = self.bulletText
if nLines > 0:
_offsets = getattr(self, '_offsets', [0])
_offsets += (nLines - len(_offsets)) * [_offsets[-1]]
canvas.saveState()
alignment = style.alignment
offset = style.firstLineIndent + _offsets[0]
lim = nLines - 1
noJustifyLast = not (hasattr(self, '_JustifyLast') and self._JustifyLast)
if blPara.kind == 0:
if alignment == TA_LEFT:
dpl = _leftDrawParaLine
elif alignment == TA_CENTER:
dpl = _centerDrawParaLine
elif self.style.alignment == TA_RIGHT:
dpl = _rightDrawParaLine
elif self.style.alignment == TA_JUSTIFY:
dpl = _justifyDrawParaLine
f = blPara
cur_y = self.height - getattr(f, 'ascent', f.fontSize) # TODO fix XPreformatted to remove this hack
if bulletText:
offset = _drawBullet(canvas, offset, cur_y, bulletText, style)
#set up the font etc.
canvas.setFillColor(f.textColor)
tx = self.beginText(cur_x, cur_y)
if autoLeading == 'max':
leading = max(leading, LEADING_FACTOR * f.fontSize)
elif autoLeading == 'min':
leading = LEADING_FACTOR * f.fontSize
#now the font for the rest of the paragraph
tx.setFont(f.fontName, f.fontSize, leading)
ws = getattr(tx, '_wordSpace', 0)
t_off = dpl(tx, offset, ws, lines[0][1], noJustifyLast and nLines == 1)
if f.underline or f.link or f.strike:
xs = tx.XtraState = ABag()
xs.cur_y = cur_y
xs.f = f
xs.style = style
xs.lines = lines
xs.underlines = []
xs.underlineColor = None
# XXX Modified for XHTML2PDF
xs.backgrounds = []
xs.backgroundColor = None
xs.backgroundFontSize = None
xs.strikes = []
xs.strikeColor = None
# XXX Modified for XHTML2PDF
xs.strikeFontSize = None
xs.links = []
xs.link = f.link
canvas.setStrokeColor(f.textColor)
dx = t_off + leftIndent
if dpl != _justifyDrawParaLine: ws = 0
# XXX Never underline!
underline = f.underline
strike = f.strike
link = f.link
if underline:
_do_under_line(0, dx, ws, tx)
if strike:
_do_under_line(0, dx, ws, tx, lm=0.125)
if link: _do_link_line(0, dx, ws, tx)
#now the middle of the paragraph, aligned with the left margin which is our origin.
for i in xrange(1, nLines):
ws = lines[i][0]
t_off = dpl(tx, _offsets[i], ws, lines[i][1], noJustifyLast and i == lim)
if dpl != _justifyDrawParaLine: ws = 0
if underline: _do_under_line(i, t_off + leftIndent, ws, tx)
if strike: _do_under_line(i, t_off + leftIndent, ws, tx, lm=0.125)
if link: _do_link_line(i, t_off + leftIndent, ws, tx)
else:
for i in xrange(1, nLines):
dpl(tx, _offsets[i], lines[i][0], lines[i][1], noJustifyLast and i == lim)
else:
f = lines[0]
cur_y = self.height - getattr(f, 'ascent', f.fontSize) # TODO fix XPreformatted to remove this hack
# default?
dpl = _leftDrawParaLineX
if bulletText:
oo = offset
offset = _drawBullet(canvas, offset, cur_y, bulletText, style)
if alignment == TA_LEFT:
dpl = _leftDrawParaLineX
elif alignment == TA_CENTER:
dpl = _centerDrawParaLineX
elif self.style.alignment == TA_RIGHT:
dpl = _rightDrawParaLineX
elif self.style.alignment == TA_JUSTIFY:
dpl = _justifyDrawParaLineX
else:
raise ValueError("bad align %s" % repr(alignment))
#set up the font etc.
tx = self.beginText(cur_x, cur_y)
xs = tx.XtraState = ABag()
xs.textColor = None
# XXX Modified for XHTML2PDF
xs.backColor = None
xs.rise = 0
xs.underline = 0
xs.underlines = []
xs.underlineColor = None
# XXX Modified for XHTML2PDF
xs.background = 0
xs.backgrounds = []
xs.backgroundColor = None
xs.backgroundFontSize = None
xs.strike = 0
xs.strikes = []
xs.strikeColor = None
# XXX Modified for XHTML2PDF
xs.strikeFontSize = None
xs.links = []
xs.link = None
xs.leading = style.leading
xs.leftIndent = leftIndent
tx._leading = None
tx._olb = None
xs.cur_y = cur_y
xs.f = f
xs.style = style
xs.autoLeading = autoLeading
tx._fontname, tx._fontsize = None, None
dpl(tx, offset, lines[0], noJustifyLast and nLines == 1)
_do_post_text(tx)
#now the middle of the paragraph, aligned with the left margin which is our origin.
for i in xrange(1, nLines):
f = lines[i]
dpl(tx, _offsets[i], f, noJustifyLast and i == lim)
_do_post_text(tx)
canvas.drawText(tx)
canvas.restoreState()
def getPlainText(self, identify=None):
"""
Convenience function for templates which want access
to the raw text, without XML tags.
"""
frags = getattr(self, 'frags', None)
if frags:
plains = []
for frag in frags:
if hasattr(frag, 'text'):
plains.append(frag.text)
return join(plains, '')
elif identify:
text = getattr(self, 'text', None)
if text is None: text = repr(self)
return text
else:
return ''
def getActualLineWidths0(self):
"""
Convenience function; tells you how wide each line
actually is. For justified styles, this will be
the same as the wrap width; for others it might be
useful for seeing if paragraphs will fit in spaces.
"""
assert hasattr(self, 'width'), "Cannot call this method before wrap()"
if self.blPara.kind:
func = lambda frag, w=self.width: w - frag.extraSpace
else:
func = lambda frag, w=self.width: w - frag[0]
return map(func, self.blPara.lines)
if __name__ == '__main__': # NORUNTESTS
def dumpParagraphLines(P):
print 'dumpParagraphLines(<Paragraph @ %d>)' % id(P)
lines = P.blPara.lines
for l, line in enumerate(lines):
line = lines[l]
if hasattr(line, 'words'):
words = line.words
else:
words = line[1]
nwords = len(words)
print 'line%d: %d(%s)\n ' % (l, nwords, str(getattr(line, 'wordCount', 'Unknown'))),
for w in xrange(nwords):
print "%d:'%s'" % (w, getattr(words[w], 'text', words[w])),
print
def fragDump(w):
R = ["'%s'" % w[1]]
for a in ('fontName', 'fontSize', 'textColor', 'rise', 'underline', 'strike', 'link', 'cbDefn', 'lineBreak'):
if hasattr(w[0], a):
R.append('%s=%r' % (a, getattr(w[0], a)))
return ', '.join(R)
def dumpParagraphFrags(P):
print 'dumpParagraphFrags(<Paragraph @ %d>) minWidth() = %.2f' % (id(P), P.minWidth())
frags = P.frags
n = len(frags)
for l in xrange(n):
print "frag%d: '%s' %s" % (
l, frags[l].text, ' '.join(['%s=%s' % (k, getattr(frags[l], k)) for k in frags[l].__dict__ if k != text]))
l = 0
cum = 0
for W in _getFragWords(frags):
cum += W[0]
print "fragword%d: cum=%3d size=%d" % (l, cum, W[0]),
for w in W[1:]:
print '(%s)' % fragDump(w),
print
l += 1
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import cm
import sys
TESTS = sys.argv[1:]
if TESTS == []:
TESTS = ['4']
def flagged(i, TESTS=TESTS):
return 'all' in TESTS or '*' in TESTS or str(i) in TESTS
styleSheet = getSampleStyleSheet()
B = styleSheet['BodyText']
style = ParagraphStyle("discussiontext", parent=B)
style.fontName = 'Helvetica'
if flagged(1):
text = '''The <font name=courier color=green>CMYK</font> or subtractive method follows the way a printer
mixes three pigments (cyan, magenta, and yellow) to form colors.
Because mixing chemicals is more difficult than combining light there
is a fourth parameter for darkness. For example a chemical
combination of the <font name=courier color=green>CMY</font> pigments generally never makes a perfect
black -- instead producing a muddy color -- so, to get black printers
don't use the <font name=courier color=green>CMY</font> pigments but use a direct black ink. Because
<font name=courier color=green>CMYK</font> maps more directly to the way printer hardware works it may
be the case that &| & | colors specified in <font name=courier color=green>CMYK</font> will provide better fidelity
and better control when printed.
'''
P = Paragraph(text, style)
dumpParagraphFrags(P)
aW, aH = 456.0, 42.8
w, h = P.wrap(aW, aH)
dumpParagraphLines(P)
S = P.split(aW, aH)
for s in S:
s.wrap(aW, aH)
dumpParagraphLines(s)
aH = 500
if flagged(2):
P = Paragraph("""Price<super><font color="red">*</font></super>""", styleSheet['Normal'])
dumpParagraphFrags(P)
w, h = P.wrap(24, 200)
dumpParagraphLines(P)
if flagged(3):
text = """Dieses Kapitel bietet eine schnelle <b><font color=red>Programme :: starten</font></b>
<onDraw name=myIndex label="Programme :: starten">
<b><font color=red>Eingabeaufforderung :: (>>>)</font></b>
<onDraw name=myIndex label="Eingabeaufforderung :: (>>>)">
<b><font color=red>>>> (Eingabeaufforderung)</font></b>
<onDraw name=myIndex label=">>> (Eingabeaufforderung)">
Einführung in Python <b><font color=red>Python :: Einführung</font></b>
<onDraw name=myIndex label="Python :: Einführung">.
Das Ziel ist, die grundlegenden Eigenschaften von Python darzustellen, ohne
sich zu sehr in speziellen Regeln oder Details zu verstricken. Dazu behandelt
dieses Kapitel kurz die wesentlichen Konzepte wie Variablen, Ausdrücke,
Kontrollfluss, Funktionen sowie Ein- und Ausgabe. Es erhebt nicht den Anspruch,
umfassend zu sein."""
P = Paragraph(text, styleSheet['Code'])
dumpParagraphFrags(P)
w, h = P.wrap(6 * 72, 9.7 * 72)
dumpParagraphLines(P)
if flagged(4):
text = '''Die eingebaute Funktion <font name=Courier>range(i, j [, stride])</font><onDraw name=myIndex label="eingebaute Funktionen::range()"><onDraw name=myIndex label="range() (Funktion)"><onDraw name=myIndex label="Funktionen::range()"> erzeugt eine Liste von Ganzzahlen und füllt sie mit Werten <font name=Courier>k</font>, für die gilt: <font name=Courier>i <= k < j</font>. Man kann auch eine optionale Schrittweite angeben. Die eingebaute Funktion <font name=Courier>xrange()</font><onDraw name=myIndex label="eingebaute Funktionen::xrange()"><onDraw name=myIndex label="xrange() (Funktion)"><onDraw name=myIndex label="Funktionen::xrange()"> erfüllt einen ähnlichen Zweck, gibt aber eine unveränderliche Sequenz vom Typ <font name=Courier>XRangeType</font><onDraw name=myIndex label="XRangeType"> zurück. Anstatt alle Werte in der Liste abzuspeichern, berechnet diese Liste ihre Werte, wann immer sie angefordert werden. Das ist sehr viel speicherschonender, wenn mit sehr langen Listen von Ganzzahlen gearbeitet wird. <font name=Courier>XRangeType</font> kennt eine einzige Methode, <font name=Courier>s.tolist()</font><onDraw name=myIndex label="XRangeType::tolist() (Methode)"><onDraw name=myIndex label="s.tolist() (Methode)"><onDraw name=myIndex label="Methoden::s.tolist()">, die seine Werte in eine Liste umwandelt.'''
aW = 420
aH = 64.4
P = Paragraph(text, B)
dumpParagraphFrags(P)
w, h = P.wrap(aW, aH)
print 'After initial wrap', w, h
dumpParagraphLines(P)
S = P.split(aW, aH)
dumpParagraphFrags(S[0])
w0, h0 = S[0].wrap(aW, aH)
print 'After split wrap', w0, h0
dumpParagraphLines(S[0])
if flagged(5):
text = '<para> %s <![CDATA[</font></b>& %s < >]]></para>' % (chr(163), chr(163))
P = Paragraph(text, styleSheet['Code'])
dumpParagraphFrags(P)
w, h = P.wrap(6 * 72, 9.7 * 72)
dumpParagraphLines(P)
if flagged(6):
for text in [
'''Here comes <FONT FACE="Helvetica" SIZE="14pt">Helvetica 14</FONT> with <STRONG>strong</STRONG> <EM>emphasis</EM>.''',
'''Here comes <font face="Helvetica" size="14pt">Helvetica 14</font> with <Strong>strong</Strong> <em>emphasis</em>.''',
'''Here comes <font face="Courier" size="3cm">Courier 3cm</font> and normal again.''',
]:
P = Paragraph(text, styleSheet['Normal'], caseSensitive=0)
dumpParagraphFrags(P)
w, h = P.wrap(6 * 72, 9.7 * 72)
dumpParagraphLines(P)
if flagged(7):
text = """<para align="CENTER" fontSize="24" leading="30"><b>Generated by:</b>Dilbert</para>"""
P = Paragraph(text, styleSheet['Code'])
dumpParagraphFrags(P)
w, h = P.wrap(6 * 72, 9.7 * 72)
dumpParagraphLines(P)
if flagged(8):
text = """- bullet 0<br/>- bullet 1<br/>- bullet 2<br/>- bullet 3<br/>- bullet 4<br/>- bullet 5"""
P = Paragraph(text, styleSheet['Normal'])
dumpParagraphFrags(P)
w, h = P.wrap(6 * 72, 9.7 * 72)
dumpParagraphLines(P)
S = P.split(6 * 72, h / 2.0)
print len(S)
dumpParagraphLines(S[0])
dumpParagraphLines(S[1])
if flagged(9):
text = """Furthermore, the fundamental error of
regarding <img src="../docs/images/testimg.gif" width="3" height="7"/> functional notions as
categorial delimits a general
convention regarding the forms of the<br/>
grammar. I suggested that these results
would follow from the assumption that"""
P = Paragraph(text, ParagraphStyle('aaa', parent=styleSheet['Normal'], align=TA_JUSTIFY))
dumpParagraphFrags(P)
w, h = P.wrap(6 * cm - 12, 9.7 * 72)
dumpParagraphLines(P)
if flagged(10):
text = """a b c\xc2\xa0d e f"""
P = Paragraph(text, ParagraphStyle('aaa', parent=styleSheet['Normal'], align=TA_JUSTIFY))
dumpParagraphFrags(P)
w, h = P.wrap(6 * cm - 12, 9.7 * 72)
dumpParagraphLines(P)
| [
"noreply@github.com"
] | noreply@github.com |
8bfa5c02a3089abb03156a6609bfed1a989474e9 | d5f8ca3c13f681d147b7614f1902df7ba34e06f9 | /Graduate/model/densenet.py | 38359413ab29892a7c8f412c5fc1741039a65696 | [] | no_license | hhjung1202/OwnAdaptation | 29a6c0a603ab9233baf293096fb9e7e956647a10 | 50805730254419f090f4854387be79648a01fbb4 | refs/heads/master | 2021-06-25T22:31:15.437642 | 2020-11-26T18:19:55 | 2020-11-26T18:19:55 | 176,670,379 | 1 | 0 | null | 2020-06-11T07:35:55 | 2019-03-20T06:36:19 | Python | UTF-8 | Python | false | false | 7,429 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from torch import Tensor
import itertools
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class _Gate_selection(nn.Sequential):
phase = 2
def __init__(self, num_input_features, growth_rate, count, reduction=4):
super(_Gate_selection, self).__init__()
self.actual = (count+1) // 2
LongTensor = torch.cuda.LongTensor if torch.cuda.is_available() else torch.LongTensor
self.init = LongTensor([i for i in range(num_input_features)]).view(1, -1)
s = num_input_features
arr = []
for j in range(count):
arr += [[i for i in range(s, s + growth_rate)]]
s+=growth_rate
self.arr = LongTensor(arr)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
channels = num_input_features + growth_rate * count
self.fc1 = nn.Linear(channels, channels//reduction)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(channels//reduction, count)
self.sigmoid = nn.Sigmoid()
self.flat = Flatten()
def forward(self, x, x_norm):
b, _, w, h = x_norm.size()
out = self.avg_pool(x_norm) # batch, channel 합친거, w, h
out = self.flat(out)
out = self.relu(self.fc1(out))
out = self.sigmoid(self.fc2(out))
_, sort = out.sort()
indices = sort[:,:self.actual] # batch, sort # shuffle
indices = indices[:, torch.randperm(indices.size(1))]
select = self.init.repeat(b,1)
select = torch.cat([select, self.arr[indices].view(b,-1)], 1)
select = select.view(select.size(0), -1, 1, 1).repeat(1,1,w,h)
x = x.gather(1, select)
return x
class _Bottleneck(nn.Sequential):
def __init__(self, num_input_features, growth_rate, count=1):
super(_Bottleneck, self).__init__()
self.norm1 = nn.BatchNorm2d(num_input_features)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(num_input_features, 4 * growth_rate,
kernel_size=1, stride=1, bias=False)
self.norm2 = nn.BatchNorm2d(4 * growth_rate)
self.conv2 = nn.Conv2d(4 * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1, bias=False)
self.count = count
def forward(self, x):
if isinstance(x, Tensor):
x = [x]
out = torch.cat(x,1)
out = self.norm1(out)
out = self.relu(out)
out = self.conv1(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(out)
return out
class _Basic(nn.Sequential):
def __init__(self, num_input_features, growth_rate):
super(_Basic, self).__init__()
self.norm1 = nn.BatchNorm2d(num_input_features)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(num_input_features, growth_rate,
kernel_size=3, stride=1, padding=1, bias=False)
self.count = count
def forward(self, x):
if isinstance(x, Tensor):
x = [x]
out = torch.cat(x,1)
out = self.norm1(out)
out = self.relu(out)
out = self.conv1(out)
return out
class _DenseLayer(nn.Module):
def __init__(self, num_input_features, growth_rate, num_layers, Block):
super(_DenseLayer, self).__init__()
self.num_layers = num_layers
self.init_block = Block(num_input_features, growth_rate)
for i in range(1, num_layers):
j = (i-1)//2 + 1
setattr(self, 'layer{}'.format(i), Block(num_input_features + growth_rate * j, growth_rate))
setattr(self, 'norm{}'.format(i), nn.BatchNorm2d(num_input_features + growth_rate * (i+1)))
setattr(self, 'gate{}'.format(i), _Gate_selection(num_input_features, growth_rate, i+1, reduction=4))
def forward(self, x):
out = self.init_block(x)
x = [x] + [out]
out = torch.cat(x,1)
for i in range(1, self.num_layers):
out = getattr(self, 'layer{}'.format(i))(out)
x += [out]
x_cat = torch.cat(x,1)
x_norm = getattr(self, 'norm{}'.format(i))(x_cat)
out = getattr(self, 'gate{}'.format(i))(x_cat, x_norm)
return x_cat
class _Transition(nn.Sequential):
def __init__(self, num_input_features, tr_features):
super(_Transition, self).__init__()
self.norm = nn.BatchNorm2d(tr_features)
self.relu = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(tr_features, num_input_features // 2,
kernel_size=1, stride=1, bias=False)
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
def forward(self, x):
# out = torch.cat(x,1)
out = self.norm(x)
out = self.relu(out)
out = self.conv(out)
out = self.pool(out)
return out
class DenseNet(nn.Module):
def __init__(self, growth_rate=12,
num_init_features=24, num_classes=10, is_bottleneck=True, layer=28):
super(DenseNet, self).__init__()
if layer is 28:
block_config=[4,4,4]
elif layer is 40:
block_config=[6,6,6]
elif layer is 52:
block_config=[8,8,8]
elif layer is 64:
block_config=[10,10,10]
if is_bottleneck:
Block = _Bottleneck
else:
Block = _Basic
block_config = [2*x for x in block_config]
self.features = nn.Sequential()
self.features.add_module('conv0', nn.Conv2d(3, num_init_features, kernel_size=3, stride=1, padding=1, bias=False))
num_features = num_init_features
for i in range(len(block_config)):
self.features.add_module('layer%d' % (i + 1), _DenseLayer(num_features, growth_rate, block_config[i], Block))
tr_features = num_features + block_config[i] * growth_rate
num_features = num_features + block_config[i] * growth_rate // 2
if i != len(block_config) - 1:
self.features.add_module('transition%d' % (i + 1), _Transition(num_features, tr_features))
num_features = num_features // 2
# Final batch norm
self.norm = nn.BatchNorm2d(tr_features)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.AvgPool2d(kernel_size=8, stride=1)
self.fc = nn.Linear(tr_features, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
# Linear layer
# Official init from torch repo.
def forward(self, x):
out = self.features(x)
# out = torch.cat(out,1)
out = self.norm(out)
out = self.relu(out)
out = self.pool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
if __name__=='__main__':
x = torch.randn(4,3,32,32)
model = DenseNet(growth_rate=12, num_init_features=24, num_classes=10, is_bottleneck=True, layer=40)
y = model(x)
print(y.size()) | [
"hhjung1202@naver.com"
] | hhjung1202@naver.com |
0d98db9ec83456db136f54a759d5de5a9a1ccb42 | c42b08296e47e113ea66d8d14b383abccfbce409 | /myhashtry.py | 877c1cafe1784c183cfe3f85b83929bd081b06e3 | [] | no_license | unmutilated/code | 49750a92ec855158740f456b3b1d3dd34890ca88 | 8961e5cf394aecdf71d70cc6b2ff03f35de14db5 | refs/heads/master | 2022-05-24T13:14:37.318698 | 2020-04-27T20:11:08 | 2020-04-27T20:11:08 | 259,436,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,308 | py | import sys
import hashlib
Output = []
def ReadFile():
file0 = open("CRY_Lab_02_B_hashes.txt", "r")
lines = f.readlines()
file0.close()
s = set()
for data in lines:
s.add(data.strip())
print("Read in {0} lines from the MD5 hash file".format(len(lines)))
return s
def SaveFile():
file1 = open("Output.txt","w")
file1.writelines(Output)
file1.close
def HashFind():
hashset = ReadFile()
alph = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&\'()*+,-./:;<=>?@"
count = 0
for element in range(0, len(alph)):
m = alph[element]
print(element) #for debuggig
print(len(alph)) #for debugging
h = hashlib.md5(m.encode()).hexdigest()
if h in hashset:
Output.append("{0} Found a hash: {1} hashes to {2}\n".format(count, m, h))
count = count +1
if count >= 1000:
print("All Done")
SaveFile()
sys.exit()
else:
sys.exit()
if __name__ == "__main__":
while True:
userchoice = input("to hash press h [Enter to quit]: ").upper()
if userchoice.startswith("H"):
HashFind()
else:
sys.exit()
| [
"noreply@github.com"
] | noreply@github.com |
4cb3844e79b7b04d524f902a1436ea166712750d | 7bc1d2a995ce6488c7dd20909a6f9443d6d8ced8 | /admin.py | f9970ac554b7883eb5ab7ee1f153581bbdd2be7d | [] | no_license | strategy2231/django_learn | dd4f7d1bd77157b893a8ea2d8355e980898687f5 | 9b9544c24d42892acef53943eb707bc5b8ca48c3 | refs/heads/master | 2021-01-12T16:01:43.756219 | 2016-10-25T18:45:48 | 2016-10-25T18:45:48 | 71,918,737 | 0 | 0 | null | 2016-10-25T18:40:31 | 2016-10-25T16:50:45 | Python | UTF-8 | Python | false | false | 612 | py |
# Register your models here.
from django.contrib import admin
from restaurants.models import Restaurant, Food,Comment
class RestaurantAdmin(admin.ModelAdmin):
list_display = ('name', 'phone_number', 'address','date')
search_fields = ('name',)
class FoodAdmin(admin.ModelAdmin):
list_display = ('name', 'restaurant', 'price','is_spicy','comment','date')
list_filter = ('is_spicy',)
#fields = ('price','restaurant')
search_fields = ('name',)
ordering = ('-price',)
admin.site.register(Restaurant,RestaurantAdmin)
admin.site.register(Food,FoodAdmin)
admin.site.register(Comment) | [
"noreply@github.com"
] | noreply@github.com |
82792a3be9979e79865b11f08d068150204766e1 | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /tests/components/select/test_device_condition.py | 7c1dc443e5626cdb246bbc9a3f633cbd756d466c | [
"Apache-2.0"
] | permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 8,288 | py | """The tests for Select device conditions."""
from __future__ import annotations
import pytest
import voluptuous_serialize
from homeassistant.components import automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.select import DOMAIN
from homeassistant.components.select.device_condition import (
async_get_condition_capabilities,
)
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import (
config_validation as cv,
device_registry,
entity_registry,
)
from homeassistant.helpers.entity import EntityCategory
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass: HomeAssistant) -> device_registry.DeviceRegistry:
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass: HomeAssistant) -> entity_registry.EntityRegistry:
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass: HomeAssistant) -> list[ServiceCall]:
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(
hass: HomeAssistant,
device_reg: device_registry.DeviceRegistry,
entity_reg: entity_registry.EntityRegistry,
) -> None:
"""Test we get the expected conditions from a select."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "selected_option",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": False},
}
]
conditions = await async_get_device_automations(
hass, DeviceAutomationType.CONDITION, device_entry.id
)
assert_lists_same(conditions, expected_conditions)
@pytest.mark.parametrize(
"hidden_by,entity_category",
(
(entity_registry.RegistryEntryHider.INTEGRATION, None),
(entity_registry.RegistryEntryHider.USER, None),
(None, EntityCategory.CONFIG),
(None, EntityCategory.DIAGNOSTIC),
),
)
async def test_get_conditions_hidden_auxiliary(
hass,
device_reg,
entity_reg,
hidden_by,
entity_category,
):
"""Test we get the expected conditions from a hidden or auxiliary entity."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
"5678",
device_id=device_entry.id,
entity_category=entity_category,
hidden_by=hidden_by,
)
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": condition,
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": True},
}
for condition in ["selected_option"]
]
conditions = await async_get_device_automations(
hass, DeviceAutomationType.CONDITION, device_entry.id
)
assert_lists_same(conditions, expected_conditions)
async def test_if_selected_option(
hass: HomeAssistant, calls: list[ServiceCall]
) -> None:
"""Test for selected_option conditions."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "select.entity",
"type": "selected_option",
"option": "option1",
}
],
"action": {
"service": "test.automation",
"data": {
"result": "option1 - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "select.entity",
"type": "selected_option",
"option": "option2",
}
],
"action": {
"service": "test.automation",
"data": {
"result": "option2 - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
]
},
)
# Test with non existing entity
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(
"select.entity", "option1", {"options": ["option1", "option2"]}
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["result"] == "option1 - event - test_event1"
hass.states.async_set(
"select.entity", "option2", {"options": ["option1", "option2"]}
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["result"] == "option2 - event - test_event2"
async def test_get_condition_capabilities(hass: HomeAssistant) -> None:
"""Test we get the expected capabilities from a select condition."""
config = {
"platform": "device",
"domain": DOMAIN,
"type": "selected_option",
"entity_id": "select.test",
"option": "option1",
}
# Test when entity doesn't exists
capabilities = await async_get_condition_capabilities(hass, config)
assert capabilities
assert "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "option",
"required": True,
"type": "select",
"options": [],
},
{
"name": "for",
"optional": True,
"type": "positive_time_period_dict",
},
]
# Mock an entity
hass.states.async_set("select.test", "option1", {"options": ["option1", "option2"]})
# Test if we get the right capabilities now
capabilities = await async_get_condition_capabilities(hass, config)
assert capabilities
assert "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "option",
"required": True,
"type": "select",
"options": [("option1", "option1"), ("option2", "option2")],
},
{
"name": "for",
"optional": True,
"type": "positive_time_period_dict",
},
]
| [
"noreply@github.com"
] | noreply@github.com |
d4703ba2bdb76a23ad5f3eef4f0eb86443e92219 | 93dd16432fcb4b42670f208edf81b2eb29f40d41 | /pycaesarcipher.py | 980eed0fa1ec667cce8da2834d93cf03891ce125 | [
"MIT"
] | permissive | shyams1993/pycaesarcipher | d067f4fda7acdb5f70687d5262a6fbc39d5e3790 | a396f165cc9a103950d060c94e25f7f344e7b257 | refs/heads/master | 2022-06-27T17:28:48.417994 | 2020-05-07T10:05:25 | 2020-05-07T10:05:25 | 261,873,682 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,180 | py | class pycaesarcipher():
'''
DOCSTRING: This class contains the encipher function & decipher function to one of the most simplest substitution Ciphers - "Caesar's Cipher"
'''
def __init__(self):
return None
def caesar_encipher(self,word,shiftkey):
'''
DOCSTRING: Function to encipher a given string using caesar cipher.
\nINPUT: Any string and shiftkey.
\nLOGIC: To encrypt, it uses the basic formula : (character + shiftkey)
\nOUTPUT: The Enciphered string result.
\nUSAGE: First import the CaesarCipher package; Then, create an instance of the class by using a variable to assign & call an instance of the class.
\nSyntax: variable_name = CaesarCipher()
\nThen create another variable to call either the caesar_encipher() method or caesae_decipher() method using two positional arguments : target word/variable, shiftkey
\nSyntax: another_variable = variable_name.caesar_encipher("string",integer)
\n\nThis logic uses ASCII code representation to convert the strings to integers. You can use any string, but this method will convert the string to lowercase and then encipher to maintain uniformity.
'''
word = word.lower()
ciphertext = []
for w in range(len(word)):
x = (ord(word[w]) + shiftkey)
if x > 122:
y = (x-122)+96
ciphertext.append(chr(y))
elif ord(word[w]) == 32:
y = 32
ciphertext.append(chr(y))
else:
ciphertext.append(chr(x))
word = ''.join([str(s) for s in ciphertext])
return word
def caesar_decipher(self,word,shiftkey):
'''
DOCSTRING: Function to decipher a given string using caesar cipher.
\nINPUT: Any string and shiftkey.
\nLOGIC: To decipher, it uses the basic formula : (character - shiftkey)
\nOUTPUT: The deciphered string result.
\nUSAGE: First import the CaesarCipher package; Then, create an instance of the class by using a variable to assign & call an instance of the class.
\nSyntax: variable_name = CaesarCipher()
\nThen create another variable to call either the caesar_encipher() method or caesae_decipher() method using two positional arguments : target word/variable, shiftkey
\nSyntax: another_variable = variable_name.caesar_decipher("string",integer)
\n\nThis logic uses ASCII code representation to convert the strings to integers. You can use any string, but this method will convert the string to lowercase and then decipher to maintain uniformity.
'''
word = word.lower()
plaintext = []
for w in range(len(word)):
x = (ord(word[w]) - shiftkey)
if x>=70 and x < 97:
y = (x-96)+122
plaintext.append(chr(y))
elif ord(word[w]) == 32:
plaintext.append(chr(32))
else:
plaintext.append(chr(x))
word = ''.join([str(s) for s in plaintext])
return word | [
"noreply@github.com"
] | noreply@github.com |
a31cb5f185c80ea397b6d84e1e2a1d488a88fd68 | a383c318c17b382bc3acad86b106584123ec5cd5 | /tifa/models/product_attr.py | fb9b11ea5d0303b510746f3e2d342138c1d3f67e | [
"MIT"
] | permissive | Jormungendr/tifa | 86f20aa8ca28548a5861c6dcd54ab12840aa0b0c | f703fd27f54000e7d51f06d2456d09cc79e0ab72 | refs/heads/master | 2023-07-13T08:21:26.464652 | 2021-08-24T14:19:52 | 2021-08-24T14:19:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,506 | py | import sqlalchemy as sa
from sqlalchemy.orm import relationship
from tifa.globals import Model
from tifa.models.attr import Attribute, AttributeValue
from tifa.models.product import ProductType, Product, ProductVariant
class AttributeProduct(Model):
__tablename__ = "attribute_product"
__table_args__ = (sa.UniqueConstraint("attribute_id", "product_type_id"),)
id = sa.Column(sa.Integer, primary_key=True)
attribute_id = sa.Column(
sa.ForeignKey("attribute.id"),
nullable=False,
)
attribute = relationship(Attribute)
product_type_id = sa.Column(
sa.ForeignKey("product_type.id"),
nullable=False,
)
product_type = relationship(ProductType)
sort_order = sa.Column(sa.Integer, index=True)
class AssignedProductAttribute(Model):
__tablename__ = "assigned_product_attribute"
__table_args__ = (sa.UniqueConstraint("product_id", "assignment_id"),)
id = sa.Column(sa.Integer, primary_key=True)
product_id = sa.Column(sa.ForeignKey("product.id"), nullable=False)
product = relationship(Product)
assignment_id = sa.Column(
sa.ForeignKey("attribute_product.id"),
nullable=False,
)
assignment = relationship(AttributeProduct)
class AssignedProductAttributeValue(Model):
__tablename__ = "assigned_product_attribute_value"
__table_args__ = (sa.UniqueConstraint("value_id", "assignment_id"),)
id = sa.Column(sa.Integer, primary_key=True)
sort_order = sa.Column(sa.Integer, index=True)
assignment_id = sa.Column(
sa.ForeignKey("assigned_product_attribute.id"),
nullable=False,
)
assignment = relationship(AssignedProductAttribute)
value_id = sa.Column(
sa.ForeignKey("attribute_value.id"),
nullable=False,
)
value = relationship(AttributeValue)
class AttributeVariant(Model):
__tablename__ = "attribute_variant"
__table_args__ = (sa.UniqueConstraint("attribute_id", "product_type_id"),)
id = sa.Column(sa.Integer, primary_key=True)
attribute_id = sa.Column(
sa.ForeignKey("attribute.id"),
nullable=False,
)
product_type_id = sa.Column(
sa.ForeignKey("product_type.id"),
nullable=False,
)
sort_order = sa.Column(sa.Integer, index=True)
attribute = relationship(Attribute)
product_type = relationship(ProductType)
class AssignedVariantAttribute(Model):
__tablename__ = "assigned_variant_attribute"
__table_args__ = (sa.UniqueConstraint("variant_id", "assignment_id"),)
id = sa.Column(sa.Integer, primary_key=True)
variant_id = sa.Column(
sa.ForeignKey("product_variant.id"),
nullable=False,
)
assignment_id = sa.Column(
sa.ForeignKey("attribute_variant.id"),
nullable=False,
)
assignment = relationship(AttributeVariant)
variant = relationship(ProductVariant)
class AssignedVariantAttributeValue(Model):
__tablename__ = "assigned_variant_attribute_value"
__table_args__ = (sa.UniqueConstraint("value_id", "assignment_id"),)
id = sa.Column(sa.Integer, primary_key=True)
sort_order = sa.Column(sa.Integer, index=True)
assignment_id = sa.Column(
sa.ForeignKey(
"assigned_variant_attribute.id",
),
nullable=False,
)
assignment = relationship(AssignedVariantAttribute)
value_id = sa.Column(
sa.ForeignKey("attribute_value.id"),
nullable=False,
)
value = relationship(AttributeValue)
| [
"noreply@github.com"
] | noreply@github.com |
8479fc36a34cd92829460ba09dac9233003f21e2 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_145/588.py | bc85913e20b14805e33519ef4c6568305d07637f | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,649 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import math
def read(f):
n = int(f.readline().strip())
for i in xrange(n):
p, q = map(int, f.readline().strip().split('/'))
yield p, q
def main(f):
for i, (p, q) in enumerate(read(f)):
if 2 ** int(math.log(q) / math.log(2)) != q:
print("Case #{0}: impossible".format(i+1))
else:
n = int(math.ceil((math.log(q) - math.log(p)) / math.log(2)))
print("Case #{0}: {1}".format(i+1, n))
_input = """
5
1/2
3/4
1/4
2/23
123/31488
""".strip()
_output = """
Case #1: 1
Case #2: 1
Case #3: 2
Case #4: impossible
Case #5: 8
""".strip()
def test_main(compare=False):
import sys
from difflib import unified_diff
from StringIO import StringIO
if compare:
stdout = sys.stdout
sys.stdout = StringIO()
try:
main(StringIO(_input))
result = sys.stdout.getvalue().strip()
finally:
sys.stdout = stdout
print(result)
for line in unified_diff(result.splitlines(), _output.splitlines(),
'Output', 'Expect', lineterm=''):
print(line)
if result == _output:
print("OK")
else:
print("NG")
else:
main(StringIO(_input))
if __name__ == '__main__':
test = False
compare = False
if test:
test_main(compare)
else:
import sys
if len(sys.argv) > 1:
f = open(sys.argv[1])
main(f)
f.close()
else:
main(sys.stdin)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
3aea4843be237c4dcdce35ea871082ef159c6872 | b9029f7e08bb93c435290e9e01dba3507714bafc | /tasks.py | a64b8ddab455bd356781035556f67836cb43532a | [
"BSD-3-Clause"
] | permissive | njwardhan/colour | 3a4bf7994e25f02e15aa16bc03d35d7f6cc61a50 | 60679360c3990bc549b5f947bfeb621383e18b5e | refs/heads/master | 2022-09-29T06:17:36.380542 | 2020-01-25T05:10:15 | 2020-01-25T05:10:15 | 253,715,920 | 0 | 0 | null | 2020-04-07T07:14:32 | 2020-04-07T07:14:31 | null | UTF-8 | Python | false | false | 13,629 | py | # -*- coding: utf-8 -*-
"""
Invoke - Tasks
==============
"""
from __future__ import unicode_literals
import sys
try:
import biblib.bib
except ImportError:
pass
import fnmatch
import os
import re
import toml
import uuid
from invoke import task
import colour
from colour.utilities import message_box
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'APPLICATION_NAME', 'APPLICATION_VERSION', 'PYTHON_PACKAGE_NAME',
'PYPI_PACKAGE_NAME', 'BIBLIOGRAPHY_NAME', 'clean', 'formatting', 'tests',
'quality', 'examples', 'preflight', 'docs', 'todo', 'requirements',
'build', 'virtualise', 'tag', 'release', 'sha256'
]
APPLICATION_NAME = colour.__application_name__
APPLICATION_VERSION = colour.__version__
PYTHON_PACKAGE_NAME = colour.__name__
PYPI_PACKAGE_NAME = 'colour-science'
BIBLIOGRAPHY_NAME = 'BIBLIOGRAPHY.bib'
@task
def clean(ctx, docs=True, bytecode=False):
"""
Cleans the project.
Parameters
----------
ctx : invoke.context.Context
Context.
docs : bool, optional
Whether to clean the *docs* directory.
bytecode : bool, optional
Whether to clean the bytecode files, e.g. *.pyc* files.
Returns
-------
bool
Task success.
"""
message_box('Cleaning project...')
patterns = ['build', '*.egg-info', 'dist']
if docs:
patterns.append('docs/_build')
patterns.append('docs/generated')
if bytecode:
patterns.append('**/*.pyc')
for pattern in patterns:
ctx.run("rm -rf {}".format(pattern))
@task
def formatting(ctx, yapf=False, asciify=True, bibtex=True):
"""
Formats the codebase with *Yapf*, converts unicode characters to ASCII and
cleanup the "BibTeX" file.
Parameters
----------
ctx : invoke.context.Context
Context.
yapf : bool, optional
Whether to format the codebase with *Yapf*.
asciify : bool, optional
Whether to convert unicode characters to ASCII.
bibtex : bool, optional
Whether to cleanup the *BibTeX* file.
Returns
-------
bool
Task success.
"""
if yapf:
message_box('Formatting codebase with "Yapf"...')
ctx.run('yapf -p -i -r --exclude \'.git\' .')
if asciify:
message_box('Converting unicode characters to ASCII...')
with ctx.cd('utilities'):
ctx.run('./unicode_to_ascii.py')
if bibtex and sys.version_info[:2] >= (3, 2):
message_box('Cleaning up "BibTeX" file...')
bibtex_path = BIBLIOGRAPHY_NAME
with open(bibtex_path) as bibtex_file:
bibtex = biblib.bib.Parser().parse(
bibtex_file.read()).get_entries()
for entry in sorted(bibtex.values(), key=lambda x: x.key):
try:
del entry['file']
except KeyError:
pass
for key, value in entry.items():
entry[key] = re.sub('(?<!\\\\)\\&', '\\&', value)
with open(bibtex_path, 'w') as bibtex_file:
for entry in bibtex.values():
bibtex_file.write(entry.to_bib())
bibtex_file.write('\n')
@task
def tests(ctx, nose=True):
"""
Runs the unit tests with *Nose* or *Pytest*.
Parameters
----------
ctx : invoke.context.Context
Context.
nose : bool, optional
Whether to use *Nose* or *Pytest*.
Returns
-------
bool
Task success.
"""
if nose:
message_box('Running "Nosetests"...')
ctx.run(
'nosetests --with-doctest --with-coverage --cover-package={0} {0}'.
format(PYTHON_PACKAGE_NAME),
env={'MPLBACKEND': 'AGG'})
else:
message_box('Running "Pytest"...')
ctx.run(
'py.test --disable-warnings --doctest-modules '
'--ignore={0}/examples {0}'.format(PYTHON_PACKAGE_NAME),
env={'MPLBACKEND': 'AGG'})
@task
def quality(ctx, flake8=True, rstlint=True):
"""
Checks the codebase with *Flake8* and lints various *restructuredText*
files with *rst-lint*.
Parameters
----------
ctx : invoke.context.Context
Context.
flake8 : bool, optional
Whether to check the codebase with *Flake8*.
rstlint : bool, optional
Whether to lint various *restructuredText* files with *rst-lint*.
Returns
-------
bool
Task success.
"""
if flake8:
message_box('Checking codebase with "Flake8"...')
ctx.run('flake8 {0} --exclude=examples'.format(PYTHON_PACKAGE_NAME))
if rstlint:
message_box('Linting "README.rst" file...')
ctx.run('rst-lint README.rst')
@task
def examples(ctx, plots=False):
"""
Runs the examples.
Parameters
----------
ctx : invoke.context.Context
Context.
plots : bool, optional
Whether to skip or only run the plotting examples: This a mutually
exclusive switch.
Returns
-------
bool
Task success.
"""
message_box('Running examples...')
for root, _dirnames, filenames in os.walk(
os.path.join(PYTHON_PACKAGE_NAME, 'examples')):
for filename in fnmatch.filter(filenames, '*.py'):
if not plots and ('plotting' in root or
'examples_interpolation' in filename or
'examples_contrast' in filename):
continue
if plots and ('plotting' not in root and
'examples_interpolation' not in filename and
'examples_contrast' not in filename):
continue
ctx.run('python {0}'.format(os.path.join(root, filename)))
@task(formatting, tests, quality, examples)
def preflight(ctx):
"""
Performs the preflight tasks, i.e. *formatting*, *tests*, *quality*, and
*examples*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Finishing "Preflight"...')
@task
def docs(ctx, plots=True, html=True, pdf=True):
"""
Builds the documentation.
Parameters
----------
ctx : invoke.context.Context
Context.
plots : bool, optional
Whether to generate the documentation plots.
html : bool, optional
Whether to build the *HTML* documentation.
pdf : bool, optional
Whether to build the *PDF* documentation.
Returns
-------
bool
Task success.
"""
if plots:
with ctx.cd('utilities'):
message_box('Generating plots...')
ctx.run('./generate_plots.py')
with ctx.prefix('export COLOUR_SCIENCE_DOCUMENTATION_BUILD=True'):
with ctx.cd('docs'):
if html:
message_box('Building "HTML" documentation...')
ctx.run('make html')
if pdf:
message_box('Building "PDF" documentation...')
ctx.run('make latexpdf')
@task
def todo(ctx):
"""
Export the TODO items.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Exporting "TODO" items...')
with ctx.cd('utilities'):
ctx.run('./export_todo.py')
@task
def requirements(ctx):
"""
Export the *requirements.txt* file.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Exporting "requirements.txt" file...')
ctx.run('poetry run pip freeze | '
'egrep -v "github.com/colour-science|enum34" '
'> requirements.txt')
@task(clean, preflight, docs, todo, requirements)
def build(ctx):
"""
Builds the project and runs dependency tasks, i.e. *docs*, *todo*, and
*preflight*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Building...')
pyproject_content = toml.load('pyproject.toml')
pyproject_content['tool']['poetry']['name'] = PYPI_PACKAGE_NAME
pyproject_content['tool']['poetry']['packages'] = [{
'include': PYTHON_PACKAGE_NAME,
'from': '.'
}]
with open('pyproject.toml', 'w') as pyproject_file:
toml.dump(pyproject_content, pyproject_file)
ctx.run('poetry build')
ctx.run('git checkout -- pyproject.toml')
with ctx.cd('dist'):
ctx.run('tar -xvf {0}-{1}.tar.gz'.format(PYPI_PACKAGE_NAME,
APPLICATION_VERSION))
ctx.run('cp {0}-{1}/setup.py ../'.format(PYPI_PACKAGE_NAME,
APPLICATION_VERSION))
ctx.run('rm -rf {0}-{1}'.format(PYPI_PACKAGE_NAME,
APPLICATION_VERSION))
with open('setup.py') as setup_file:
source = setup_file.read()
setup_kwargs = []
def sub_callable(match):
setup_kwargs.append(match)
return ''
template = """
setup({0}
)
"""
source = re.sub(
'setup_kwargs = {(.*)}.*setup\\(\\*\\*setup_kwargs\\)',
sub_callable,
source,
flags=re.DOTALL)[:-2]
setup_kwargs = setup_kwargs[0].group(1).splitlines()
for i, line in enumerate(setup_kwargs):
setup_kwargs[i] = re.sub('^\\s*(\'(\\w+)\':\\s?)', ' \\2=', line)
if setup_kwargs[i].strip().startswith('long_description'):
setup_kwargs[i] = (
' long_description=open(\'README.rst\').read(),')
source += template.format('\n'.join(setup_kwargs))
with open('setup.py', 'w') as setup_file:
setup_file.write(source)
@task
def virtualise(ctx, tests=True):
"""
Create a virtual environment for the project build.
Parameters
----------
ctx : invoke.context.Context
Context.
tests : bool, optional
Whether to run tests on the virtual environment.
Returns
-------
bool
Task success.
"""
unique_name = '{0}-{1}'.format(PYPI_PACKAGE_NAME, uuid.uuid1())
with ctx.cd('dist'):
ctx.run('tar -xvf {0}-{1}.tar.gz'.format(PYPI_PACKAGE_NAME,
APPLICATION_VERSION))
ctx.run('mv {0}-{1} {2}'.format(PYPI_PACKAGE_NAME, APPLICATION_VERSION,
unique_name))
with ctx.cd(unique_name):
ctx.run('poetry env use 3')
ctx.run('poetry install --extras "optional plotting"')
ctx.run('source $(poetry env info -p)/bin/activate')
ctx.run('python -c "import imageio;'
'imageio.plugins.freeimage.download()"')
if tests:
ctx.run('poetry run nosetests', env={'MPLBACKEND': 'AGG'})
@task
def tag(ctx):
"""
Tags the repository according to defined version using *git-flow*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Tagging...')
result = ctx.run('git rev-parse --abbrev-ref HEAD', hide='both')
assert result.stdout.strip() == 'develop', (
'Are you still on a feature or master branch?')
with open(os.path.join(PYTHON_PACKAGE_NAME, '__init__.py')) as file_handle:
file_content = file_handle.read()
major_version = re.search("__major_version__\\s+=\\s+'(.*)'",
file_content).group(1)
minor_version = re.search("__minor_version__\\s+=\\s+'(.*)'",
file_content).group(1)
change_version = re.search("__change_version__\\s+=\\s+'(.*)'",
file_content).group(1)
version = '.'.join((major_version, minor_version, change_version))
result = ctx.run('git ls-remote --tags upstream', hide='both')
remote_tags = result.stdout.strip().split('\n')
tags = set()
for remote_tag in remote_tags:
tags.add(
remote_tag.split('refs/tags/')[1].replace('refs/tags/', '^{}'))
tags = sorted(list(tags))
assert 'v{0}'.format(version) not in tags, (
'A "{0}" "v{1}" tag already exists in remote repository!'.format(
PYTHON_PACKAGE_NAME, version))
ctx.run('git flow release start v{0}'.format(version))
ctx.run('git flow release finish v{0}'.format(version))
@task(clean, build)
def release(ctx):
"""
Releases the project to *Pypi* with *Twine*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Releasing...')
with ctx.cd('dist'):
ctx.run('twine upload *.tar.gz')
ctx.run('twine upload *.whl')
@task
def sha256(ctx):
"""
Computes the project *Pypi* package *sha256* with *OpenSSL*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Computing "sha256"...')
with ctx.cd('dist'):
ctx.run('openssl sha256 {0}-*.tar.gz'.format(PYPI_PACKAGE_NAME))
| [
"thomas.mansencal@gmail.com"
] | thomas.mansencal@gmail.com |
140b356fa408e4eb413cb2c100895ff01e14c112 | 264cbdc7c2b4091179ba5fbdbb15005f6ac58b9f | /Algos/C51/examples/python/c51_ddqn.py | bb6455d9b4ae5b306ac48462cd633e024bd33c62 | [] | no_license | geeko66/PA2018-2019-KA | e25b49dd71ad4b5b2f3a00624147a9b24151c3d8 | 186d127608c8ea754a6e64836b0347d32cf37da6 | refs/heads/master | 2020-04-15T21:46:42.503444 | 2019-01-16T11:12:12 | 2019-01-16T11:12:12 | 165,046,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,185 | py | #!/usr/bin/env python
from __future__ import print_function
import skimage as skimage
from skimage import transform, color, exposure
from skimage.viewer import ImageViewer
import random
from random import choice
import numpy as np
from collections import deque
import time
import math
import pickle
import json
from keras.models import model_from_json
from keras.models import Sequential, load_model, Model
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, Dense, Flatten, merge, MaxPooling2D, Input, AveragePooling2D, Lambda, Merge, Activation, Embedding
from keras.optimizers import SGD, Adam, rmsprop
from keras import backend as K
from keras.utils import np_utils
from vizdoom import DoomGame, ScreenResolution
from vizdoom import *
import itertools as it
from time import sleep
import tensorflow as tf
from networks import Networks
import sys
# Not needed for the bonseyes's project
def preprocessImg(img, size):
img = np.rollaxis(img, 0, 3) # It becomes (640, 480, 3)
img = skimage.transform.resize(img,size)
img = skimage.color.rgb2gray(img)
return img
class C51Agent:
def __init__(self, state_size, action_size, num_atoms):
# get size of state and action
self.state_size = state_size
self.action_size = action_size
# these is hyper parameters for the DQN
self.gamma = 0.99
self.learning_rate = 0.0001
self.epsilon = 1.0
self.initial_epsilon = 1.0
self.final_epsilon = 0.0001
self.batch_size = 32
self.observe = 2000
self.explore = 50000
self.frame_per_action = 4
self.update_target_freq = 3000
self.timestep_per_train = 100 # Number of timesteps between training interval
# Initialize Atoms
self.num_atoms = num_atoms # 51 for C51
self.v_max = 30 # Max possible score for Defend the center is 26 - 0.1*26 = 23.4
self.v_min = -10 # -0.1*26 - 1 = -3.6
self.delta_z = (self.v_max - self.v_min) / float(self.num_atoms - 1)
self.z = [self.v_min + i * self.delta_z for i in range(self.num_atoms)]
# Create replay memory using deque
self.memory = deque()
self.max_memory = 50000 # number of previous transitions to remember
# Models for value distribution
self.model = None
self.target_model = None
# Performance Statistics
self.stats_window_size= 50 # window size for computing rolling statistics
self.mavg_score = [] # Moving Average of Survival Time
self.var_score = [] # Variance of Survival Time
self.mavg_ammo_left = [] # Moving Average of Ammo used
self.mavg_kill_counts = [] # Moving Average of Kill Counts
def update_target_model(self):
"""
After some time interval update the target model to be same with model
"""
self.target_model.set_weights(self.model.get_weights())
def get_action(self, state):
"""
Get action from model using epsilon-greedy policy
"""
if np.random.rand() <= self.epsilon:
#print("----------Random Action----------")
action_idx = random.randrange(self.action_size)
else:
action_idx = self.get_optimal_action(state)
return action_idx
def get_optimal_action(self, state):
"""Get optimal action for a state
"""
z = self.model.predict(state) # Return a list [1x51, 1x51, 1x51]
z_concat = np.vstack(z)
q = np.sum(np.multiply(z_concat, np.array(self.z)), axis=1)
# Pick action with the biggest Q value
action_idx = np.argmax(q)
return action_idx
def shape_reward(self, r_t, misc, prev_misc, t):
"""
Reward design:
Will be the inverted time in Bonseyes (x = -x) because
the time is the thing we want to minimize, therrefore we
maximize the invert time
"""
# Check any kill count
if misc[0] > prev_misc[0]:
r_t = r_t + 1
if misc[1] < prev_misc[1]: # Use ammo
r_t = r_t - 0.1
if misc[2] < prev_misc[2]: # Loss HEALTH
r_t = r_t - 0.1
return r_t
# save sample <s,a,r,s'> to the replay memory
def replay_memory(self, s_t, action_idx, r_t, s_t1, is_terminated, t):
"""
Used for the replay experience
"""
self.memory.append((s_t, action_idx, r_t, s_t1, is_terminated))
if self.epsilon > self.final_epsilon and t > self.observe:
self.epsilon -= (self.initial_epsilon - self.final_epsilon) / self.explore
if len(self.memory) > self.max_memory:
self.memory.popleft()
# Update the target model to be same with model
if t % self.update_target_freq == 0:
self.update_target_model()
# pick samples randomly from replay memory (with batch_size)
def train_replay(self):
"""
Notes: Update this part to prioritize the experience replay
following the other code. To see!!!
"""
num_samples = min(self.batch_size * self.timestep_per_train, len(self.memory))
replay_samples = random.sample(self.memory, num_samples)
state_inputs = np.zeros(((num_samples,) + self.state_size))
next_states = np.zeros(((num_samples,) + self.state_size))
m_prob = [np.zeros((num_samples, self.num_atoms)) for i in range(action_size)]
action, reward, done = [], [], []
for i in range(num_samples):
state_inputs[i,:,:,:] = replay_samples[i][0]
action.append(replay_samples[i][1])
reward.append(replay_samples[i][2])
next_states[i,:,:,:] = replay_samples[i][3]
done.append(replay_samples[i][4])
z = self.model.predict(next_states) # Return a list [32x51, 32x51, 32x51]
z_ = self.target_model.predict(next_states) # Return a list [32x51, 32x51, 32x51]
# Get Optimal Actions for the next states (from distribution z)
optimal_action_idxs = []
z_concat = np.vstack(z)
q = np.sum(np.multiply(z_concat, np.array(self.z)), axis=1) # length (num_atoms x num_actions)
q = q.reshape((num_samples, action_size), order='F')
optimal_action_idxs = np.argmax(q, axis=1)
# Project Next State Value Distribution (of optimal action) to Current State
for i in range(num_samples):
if done[i]: # Terminal State
# Distribution collapses to a single point
Tz = min(self.v_max, max(self.v_min, reward[i]))
bj = (Tz - self.v_min) / self.delta_z
m_l, m_u = math.floor(bj), math.ceil(bj)
m_prob[action[i]][i][int(m_l)] += (m_u - bj)
m_prob[action[i]][i][int(m_u)] += (bj - m_l)
else:
for j in range(self.num_atoms):
Tz = min(self.v_max, max(self.v_min, reward[i] + self.gamma * self.z[j]))
bj = (Tz - self.v_min) / self.delta_z
m_l, m_u = math.floor(bj), math.ceil(bj)
m_prob[action[i]][i][int(m_l)] += z_[optimal_action_idxs[i]][i][j] * (m_u - bj)
m_prob[action[i]][i][int(m_u)] += z_[optimal_action_idxs[i]][i][j] * (bj - m_l)
loss = self.model.fit(state_inputs, m_prob, batch_size=self.batch_size, epochs=1, verbose=0)
return loss.history['loss']
# load the saved model
def load_model(self, name):
self.model.load_weights(name)
# save the model which is under training
def save_model(self, name):
self.model.save_weights(name)
if __name__ == "__main__":
print("System path")
print(sys.path)
# Avoid Tensorflow eats up GPU memory
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)
game = DoomGame()
# game.load_config("..\..\scenarios\defend_the_center.cfg")
game.load_config("/Users/tesla/Downloads/ViZDoom-master/scenarios/defend_the_center.cfg")
game.set_sound_enabled(True)
game.set_screen_resolution(ScreenResolution.RES_640X480)
game.set_window_visible(False)
game.set_mode(Mode.PLAYER)
game.init()
game.new_episode("./episode_rec/ep1.lmp")
game_state = game.get_state()
misc = game_state.game_variables # [KILLCOUNT, AMMO, HEALTH]
prev_misc = misc
action_size = game.get_available_buttons_size()
img_rows , img_cols = 64, 64
# Convert image into Black and white
img_channels = 4 # We stack 4 frames
# C51
num_atoms = 51
state_size = (img_rows, img_cols, img_channels)
agent = C51Agent(state_size, action_size, num_atoms)
agent.model = Networks.value_distribution_network(state_size, num_atoms, action_size, agent.learning_rate)
agent.target_model = Networks.value_distribution_network(state_size, num_atoms, action_size, agent.learning_rate)
x_t = game_state.screen_buffer # 480 x 640
x_t = preprocessImg(x_t, size=(img_rows, img_cols))
s_t = np.stack(([x_t]*4), axis=2) # It becomes 64x64x4
s_t = np.expand_dims(s_t, axis=0) # 1x64x64x4
is_terminated = game.is_episode_finished()
# Start training
epsilon = agent.initial_epsilon
GAME = 0
t = 0
max_life = 0 # Maximum episode life (Proxy for agent performance)
life = 0
# Buffer to compute rolling statistics
tot_reward_buffer, life_buffer, ammo_buffer, kills_buffer, mavg_score, \
var_score, mavg_ammo_left, mavg_kill_counts, \
mavg_tot_rewards = [], [], [], [], [], [], [], [], []
losses_buffer, epsilon_buffer, stats_store = [], [], []
episode_co = 1
while not game.is_episode_finished():
loss = 0
r_t = 0
a_t = np.zeros([action_size])
# Epsilon Greedy
action_idx = agent.get_action(s_t)
a_t[action_idx] = 1
a_t = a_t.astype(int)
game.set_action(a_t.tolist())
skiprate = agent.frame_per_action
game.advance_action(skiprate)
game_state = game.get_state() # Observe again after we take the action
is_terminated = game.is_episode_finished()
r_t = game.get_last_reward() #each frame we get reward of 0.1, so 4 frames will be 0.4
if (is_terminated):
if (life > max_life):
max_life = life
GAME += 1
life_buffer.append(life)
ammo_buffer.append(misc[1])
kills_buffer.append(misc[0])
print("Episode Finish ", misc)
game.new_episode("./episode_rec/ep" + str(episode_co) + "_rec.lmp")
episode_co += 1
game_state = game.get_state()
misc = game_state.game_variables
x_t1 = game_state.screen_buffer
x_t1 = game_state.screen_buffer
misc = game_state.game_variables
x_t1 = preprocessImg(x_t1, size=(img_rows, img_cols))
x_t1 = np.reshape(x_t1, (1, img_rows, img_cols, 1))
s_t1 = np.append(x_t1, s_t[:, :, :, :3], axis=3)
r_t = agent.shape_reward(r_t, misc, prev_misc, t)
if (is_terminated):
life = 0
else:
life += 1
#update the cache
prev_misc = misc
# save the sample <s, a, r, s'> to the replay memory and decrease epsilon
agent.replay_memory(s_t, action_idx, r_t, s_t1, is_terminated, t)
# Do the training
if t > agent.observe and t % agent.timestep_per_train == 0:
loss = agent.train_replay()
losses_buffer.append({'loss': loss, 'episode': GAME})
s_t = s_t1
t += 1
# save progress every 10000 iterations
if t % 10000 == 0:
print("Now we save model")
agent.model.save_weights("./models/c51_ddqn.h5", overwrite=True)
# print info
state = ""
if t <= agent.observe:
state = "observe"
elif t > agent.observe and t <= agent.observe + agent.explore:
state = "explore"
else:
state = "train"
if is_terminated:
print("TIME", t, "/ GAME", GAME, "/ STATE", state, \
"/ EPSILON", agent.epsilon, "/ ACTION", action_idx, "/ REWARD", r_t, \
"/ LIFE", max_life, "/ LOSS", loss)
epsilon_buffer.append(agent.epsilon)
tot_reward_buffer.append(r_t)
# Save Agent's Performance Statistics
if GAME % agent.stats_window_size == 0 and t > agent.observe:
print("Update Rolling Statistics")
agent.mavg_score.append(np.mean(np.array(life_buffer)))
agent.var_score.append(np.var(np.array(life_buffer)))
agent.mavg_ammo_left.append(np.mean(np.array(ammo_buffer)))
agent.mavg_kill_counts.append(np.mean(np.array(kills_buffer)))
mavg_tot_rewards.append(np.mean(np.array(tot_reward_buffer)))
# Reset rolling stats buffer
life_buffer, ammo_buffer, kills_buffer = [], [], []
# Write Rolling Statistics to file
with open("./c51_ddqn_stats.txt", "w") as stats_file:
stats_file.write('Game: ' + str(GAME) + '\n')
stats_file.write('Max Score: ' + str(max_life) + '\n')
stats_file.write('mavg_score: ' + str(agent.mavg_score) + '\n')
stats_file.write('var_score: ' + str(agent.var_score) + '\n')
stats_file.write('mavg_ammo_left: ' + str(agent.mavg_ammo_left) + '\n')
stats_file.write('mavg_kill_counts: ' + str(agent.mavg_kill_counts) + '\n')
stats_file.write('mavg_rewards: ' + str(mavg_tot_rewards) + "\n")
with open("./ddqn_pr_steps_stats" + str(GAME) + ".pickle",
'wb') as handle:
pickle.dump(stats_store.append(
{'game': GAME, 'max_score': max_life,
'mavg_score': agent.mavg_score,
'var_score': agent.var_score,
'mavg_ammo_left': agent.mavg_ammo_left,
'mavg_kill_counts': agent.mavg_kill_counts,
'mavg_tot_rewards': mavg_tot_rewards}), handle,
protocol=pickle.HIGHEST_PROTOCOL)
with open("./buffer_dic_data" + str(GAME) + ".pickle", 'wb') as handle:
pickle.dump(stats_store.append({'life_buffer': life_buffer,
'ammo_buffer': ammo_buffer,
'kills_buffer': kills_buffer,
'tot_reward_buffer': tot_reward_buffer,
'losses': losses_buffer,
'epsilon': epsilon_buffer}),
handle, protocol=pickle.HIGHEST_PROTOCOL) | [
"noreply@github.com"
] | noreply@github.com |
729aafbd622a90e8bebf023ef2424d3fcf61b70c | afea9757be324c8def68955a12be11d71ce6ad35 | /willyanealves/services/migrations/0014_auto_20201209_1623.py | aa5563d97e9d3dbc154b4da10bedc96ae1265e5e | [] | no_license | bergpb/willyane-alves | c713cac3ec3a68005f3b8145985693d2477ba706 | 8b2b9922ba35bf2043f2345228f03d80dbd01098 | refs/heads/master | 2023-02-10T19:57:50.893172 | 2021-01-11T16:17:14 | 2021-01-11T16:17:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | # Generated by Django 3.1.2 on 2020-12-09 19:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('stock', '0001_initial'),
('services', '0013_remove_kititem_price'),
]
operations = [
migrations.AlterField(
model_name='kititem',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stockitem', to='stock.stock'),
),
]
| [
"jocsadm@gmail.com"
] | jocsadm@gmail.com |
041cf40053b8f029ba5b1f64754d2048cbb70f5e | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res_bw/scripts/common/lib/idlelib/grepdialog.py | 05f4b74a7d37f75455c785428aa681b07d431a4b | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 4,154 | py | # 2015.11.10 21:36:11 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/idlelib/GrepDialog.py
import os
import fnmatch
import sys
from Tkinter import *
from idlelib import SearchEngine
from idlelib.SearchDialogBase import SearchDialogBase
def grep(text, io = None, flist = None):
root = text._root()
engine = SearchEngine.get(root)
if not hasattr(engine, '_grepdialog'):
engine._grepdialog = GrepDialog(root, engine, flist)
dialog = engine._grepdialog
searchphrase = text.get('sel.first', 'sel.last')
dialog.open(text, searchphrase, io)
class GrepDialog(SearchDialogBase):
title = 'Find in Files Dialog'
icon = 'Grep'
needwrapbutton = 0
def __init__(self, root, engine, flist):
SearchDialogBase.__init__(self, root, engine)
self.flist = flist
self.globvar = StringVar(root)
self.recvar = BooleanVar(root)
def open(self, text, searchphrase, io = None):
SearchDialogBase.open(self, text, searchphrase)
if io:
path = io.filename or ''
else:
path = ''
dir, base = os.path.split(path)
head, tail = os.path.splitext(base)
if not tail:
tail = '.py'
self.globvar.set(os.path.join(dir, '*' + tail))
def create_entries(self):
SearchDialogBase.create_entries(self)
self.globent = self.make_entry('In files:', self.globvar)
def create_other_buttons(self):
f = self.make_frame()
btn = Checkbutton(f, anchor='w', variable=self.recvar, text='Recurse down subdirectories')
btn.pack(side='top', fill='both')
btn.select()
def create_command_buttons(self):
SearchDialogBase.create_command_buttons(self)
self.make_button('Search Files', self.default_command, 1)
def default_command(self, event = None):
prog = self.engine.getprog()
if not prog:
return
path = self.globvar.get()
if not path:
self.top.bell()
return
from idlelib.OutputWindow import OutputWindow
save = sys.stdout
try:
sys.stdout = OutputWindow(self.flist)
self.grep_it(prog, path)
finally:
sys.stdout = save
def grep_it(self, prog, path):
dir, base = os.path.split(path)
list = self.findfiles(dir, base, self.recvar.get())
list.sort()
self.close()
pat = self.engine.getpat()
print 'Searching %r in %s ...' % (pat, path)
hits = 0
for fn in list:
try:
with open(fn) as f:
for lineno, line in enumerate(f, 1):
if line[-1:] == '\n':
line = line[:-1]
if prog.search(line):
sys.stdout.write('%s: %s: %s\n' % (fn, lineno, line))
hits += 1
except IOError as msg:
print msg
print 'Hits found: %s\n(Hint: right-click to open locations.)' % hits if hits else 'No hits.'
def findfiles(self, dir, base, rec):
try:
names = os.listdir(dir or os.curdir)
except os.error as msg:
print msg
return []
list = []
subdirs = []
for name in names:
fn = os.path.join(dir, name)
if os.path.isdir(fn):
subdirs.append(fn)
elif fnmatch.fnmatch(name, base):
list.append(fn)
if rec:
for subdir in subdirs:
list.extend(self.findfiles(subdir, base, rec))
return list
def close(self, event = None):
if self.top:
self.top.grab_release()
self.top.withdraw()
if __name__ == '__main__':
import unittest
unittest.main('idlelib.idle_test.test_grep', verbosity=2, exit=False)
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\idlelib\grepdialog.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:36:11 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
489be89dfb47f43097ad446f460e1cbd05328464 | 2cfe527e8a5d9c44aa0f83574b1016ec35755446 | /PyFunnels/PyF_theharvester.py | 4b3c10eeaa1b0b57eb4a4a85d46a07744ac7e1e2 | [
"MIT"
] | permissive | polling-repo-continua/PyFunnels | e3d7a6a89d0369914f5b7ca160c16ea9ebe025c6 | f8089c3c39248eb1ef97f2681c43f76f55a07900 | refs/heads/master | 2022-02-14T12:07:09.866528 | 2019-08-13T17:52:07 | 2019-08-13T17:52:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | import xml.etree.ElementTree as ET
class PyFtheHarvester:
CAPABILITIES = ['domains', 'ips', 'emails']
def __init__(self,
file,
list_domains = [],
list_ips = [],
list_emails = []
):
self.file = file
self.list_domains = list_domains
self.list_ips = list_ips
self.list_emails = list_emails
self.tree = ET.parse(self.file)
self.root = self.tree.getroot()
def domains(self):
for d in self.root.findall('host'):
domain = d.find('hostname').text
if domain not in self.list_domains:
self.list_domains.append(domain)
def ips(self):
for i in self.root.findall('host'):
ip = i.find('ip').text
if ip not in self.list_ips:
self.list_ips.append(ip)
def emails(self):
for e in self.root.findall('email'):
email = e.text
if email not in self.list_emails:
self.list_emails.append(email) | [
"noreply@github.com"
] | noreply@github.com |
c91563eee6c60960746a34671256bdc380a91e08 | af3ec207381de315f4cb6dddba727d16d42d6c57 | /dialogue-engine/test/programytest/storage/stores/nosql/mongo/store/test_sets.py | b4a1ce00829727f91194650b0127c7d2bb059299 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mcf-yuichi/cotoba-agent-oss | 02a5554fe81ce21517f33229101013b6487f5404 | ce60833915f484c4cbdc54b4b8222d64be4b6c0d | refs/heads/master | 2023-01-12T20:07:34.364188 | 2020-11-11T00:55:16 | 2020-11-11T00:55:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,711 | py | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programytest.storage.asserts.store.assert_sets import SetStoreAsserts
from programy.storage.stores.nosql.mongo.store.sets import MongoSetsStore
from programy.storage.stores.nosql.mongo.engine import MongoStorageEngine
from programy.storage.stores.nosql.mongo.config import MongoStorageConfiguration
import programytest.storage.engines as Engines
class MongoSetsStoreTests(SetStoreAsserts):
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_initialise(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assertEqual(store.storage_engine, engine)
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_set_storage(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_set_storage(store)
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_upload_from_text(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_upload_from_text(store)
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_upload_from_text_file(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_upload_from_text_file(store)
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_upload_text_files_from_directory_no_subdir(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_upload_text_files_from_directory_no_subdir(store)
@unittest.skip("CSV not supported yet")
def test_upload_from_csv_file(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_upload_from_csv_file(store)
@unittest.skip("CSV not supported yet")
def test_upload_csv_files_from_directory_with_subdir(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_upload_csv_files_from_directory_with_subdir(store)
| [
"cliff@cotobadesign.com"
] | cliff@cotobadesign.com |
e7b6ed30d1d3b6ae95bd07204d6d545021943528 | a3ffecad8d176142f0f9b7504503365b8e64bd69 | /turtle2/n2.py | 2bd41ffc2fb2ecbcdad4ab57df34e1a505316357 | [] | no_license | dumb-anchovy/mipt_python_1sem | 517a497d879be1f32530c023af2a9481430c024f | 76d4f378ff74345ac3107d42ce16a68cc5d2e46f | refs/heads/main | 2023-08-27T16:48:18.210559 | 2021-11-02T11:25:17 | 2021-11-02T11:25:17 | 410,534,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | import turtle as t
a0 = [0, 0, 40, 0, 0, -80, -40, 0, 0, 80, 0, 0]
a1 = [0, -40, 40, 40, 0, -80, -40, 80]
a2 = [0, 0, 40, 0, 0, -40, -40, -40, 40, 0, -40, 80]
a3 = [0, 0, 40, 0, -40, -40, 40, 0, -40, -40, 0, 80]
a4 = [0, 0, 0, -40, 40, 0, 0, -40, 0, 80, -40, 0]
a5 = [40, 0, -40, 0, 0, -40, 40, 0, 0, -40, -40, 0, 0, 80]
a6 = [40, 0, -40, -40, 0, -40, 40, 0, 0, 40, -40, 0, 0, 40]
a7 = [0, 0, 40, 0, -40, -40, 0, -40, 0, 80]
a8 = [0, 0, 40, 0, 0, -40, -40, 0, 0, -40, 40, 0, 0, 40, -40, 0, 0, 40, 0, 0]
a9 = [0, -80, 40, 40, 0, 40, -40, 0, 0, -40, 40, 0, -40, 40]
al = [a0, a1, a2, a3, a4, a5, a6, a7, a8, a9]
def ch(a):
x = t.xcor()
y = t.ycor()
for n in range(0, len(a), 2):
if (n == 0) or (n == len(a) - 2):
x += a[n]
y += a[n + 1]
t.penup()
t.goto(x, y)
t.pendown()
else:
x += a[n]
y += a[n + 1]
t.goto(x, y)
x = -370
y = 0
t.penup()
t.goto(x, y)
t.pendown()
#141700
k = [1, 4, 1, 7, 0, 0]
for j in k:
ch(al[j])
x = t.xcor()
y = t.ycor()
t.penup()
t.goto(x + 80, y)
t.pendown()
t.exitonclick()
| [
"noreply@github.com"
] | noreply@github.com |
c941709fbed0b9fa452dac0e4e3ea4916d99de51 | 3b630e8ffae16049b09ea90b3d4af4e2c7b9483b | /firstphy.py | 35ea0b20e4778b407114c119c477c625d43f2d8e | [] | no_license | shafifx/myhub | fe91a2d46c0ba7f7d58057e1d05aecc067989fc9 | a3939fe4743a80535af1334f1f7fc78f28482745 | refs/heads/main | 2023-06-06T22:34:09.271540 | 2021-07-08T16:17:53 | 2021-07-08T16:17:53 | 383,184,433 | 0 | 0 | null | 2021-07-08T16:17:53 | 2021-07-05T15:21:38 | Python | UTF-8 | Python | false | false | 43 | py | hry pythonhttps://github.com/shafifx/myhub
| [
"noreply@github.com"
] | noreply@github.com |
78f31a9c174255d188697506e1941c866f62891c | 8f949493064b77dd3f19ceeed1e86382ace176d6 | /posts/urls.py | 3f113ad6817989d01a71ca2970489a00507bc58f | [] | no_license | sudhanshu-jha/simplesocial | 44a19a1b1051dcc8577de5d87660a5b890b829d1 | 6d40293be75703d5498025150acf9e91bae6f77c | refs/heads/master | 2020-04-17T07:41:54.207867 | 2019-01-18T10:24:14 | 2019-01-18T10:24:14 | 135,698,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | from django.conf.urls import url
from . import views
app_name = "posts"
urlpatterns = [
url(r"^$", views.PostList.as_view(), name="all"),
url(r"new/$", views.CreatePost.as_view(), name="create"),
url(r"by/(?P<username>[-\w]+)/$", views.UserPosts.as_view(), name="for_user"),
url(
r"by/(?P<username>[-\w]+)/(?P<pk>\d+)/$",
views.PostDetail.as_view(),
name="single",
),
url(r"delete/(?P<pk>\d+)/$", views.DeletePost.as_view(), name="delete"),
]
| [
"noreply@github.com"
] | noreply@github.com |
c82b677441afb16074f0386638f5da0f86f9303e | 56a8d1f72b005bd52560c3804541be729876aa9f | /rotation.py | 2f05ebde4daf3525b7c39a173e8cbb402cf3dc59 | [] | no_license | drrobotk/pycodilitytests | e5e13c9dd683207290e598e577d73555c0ef29ed | acb5a8ad52135fa327fb97d7c42f95ae23cb3389 | refs/heads/master | 2021-04-14T03:16:33.397722 | 2020-03-22T15:23:57 | 2020-03-22T15:23:57 | 249,203,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | # you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A, K):
k = 0
# write your code in Python 3.6
if len(A) != 0:
for i in range(K):
k = A[len(A)-1]
A.pop()
A.insert(0,k)
return A
if __name__ == '__main__':
A = []
K = 3
result = solution(A, K)
print(result) | [
"noreply@github.com"
] | noreply@github.com |
0f0a43f2a910cb3bd27dccab958083608f47a592 | 0258e0c9595406ceb3de32067aff776bc2a58fa8 | /06_p12.py | a649f413d98bebdcef131856db0da2a3d6949b5d | [] | no_license | akromibn37/python_code | 72c016c361b3ba2e04c83e1d1a703171b0bd8819 | 41d1a09f8ec8696e37ad83c1a0cb6506c7f0f4f6 | refs/heads/master | 2020-03-21T22:57:25.111642 | 2018-06-29T14:14:33 | 2018-06-29T14:14:33 | 139,157,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | data = input().strip()
l = []
for x in range(len(data)):
l.append(data[x])
num = int(input().strip())
out = ""
i = 0
while i<num:
out = ""
command = [e for e in input().split()]
if command[0] == "in":
l.insert(int(command[2]),command[1])
elif command[0] == "out":
l.pop(int(command[1]))
elif command[0] == "swap":
x = l[int(command[1])]
y = l[int(command[2])]
l[int(command[1])] = y
l[int(command[2])] = x
for j in range(len(l)):
out += l[j]
print(out)
i+=1
| [
"apple@Apples-MacBook-Pro.local"
] | apple@Apples-MacBook-Pro.local |
8ab81a05046b4fbe1d20f70062f9411fee994e8d | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_to_M_Gk3_no_pad/pyr_Tcrop255_pad20_jit15/Sob_k17_s001/pyr_4s/L4/step10_a.py | 75773149c2e2458db22e88582b00384156b134b7 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,921 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_4side_L4 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_in_I_gt_MC
use_loss_obj = [G_sobel_k17_loss_info_builder.set_loss_target("UNet_Mask").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
# 1 3 6 10 15 21 28 36 45 55
# side1 OK 1
ch032_1side_1__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# side2 OK 4
ch032_1side_2__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 "6" 10 15 21 28 36 45 55
# side3 OK 10
ch032_1side_3__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 6 "10" 15 21 28 36 45 55
# side4 OK 20
ch032_1side_4__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 6 10 "15" 21 28 36 45 55
# side5 OK 35
ch032_1side_5__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_5.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1__2side_1__3side_1_4side_1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
a39cbb706ac3420712b45eb050eae01efddba13e | 1e3f458b297b349eb875aebab254e05cdad2458e | /guessno.py | 6d6b1cee5d25c4ad5b2e5dd171bb21ffbf8c8694 | [] | no_license | mezeru/Python_Coding | 899169e162d01a2a1f6f043e45f3b07dc68e1001 | 99941431025b5c35731903dabb6c9e6106f59fcc | refs/heads/master | 2023-07-04T11:51:28.174018 | 2021-08-06T20:05:58 | 2021-08-06T20:05:58 | 255,226,334 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | import random
def rnum():
return (random.randint(0,10))
fnum=rnum()
cou = 0
while True:
print("Guesses the no :")
cou=cou+1
G=int(input())
if fnum == G :
print("You guessed right in " + str(cou)+" Guess")
break
elif fnum > G:
print("You guessed LOW")
continue
elif fnum < G:
print("You guessed High")
continue
else:
continue
| [
"noreply@github.com"
] | noreply@github.com |
a3832070b1ec7002d6f2dd0a9f5bd280d29a3962 | 1fe8d4133981e53e88abf633046060b56fae883e | /venv/lib/python3.8/site-packages/tensorflow/python/keras/layers/cudnn_recurrent 2.py | 96ae66c775e623fff4738688d4f11005c5261b33 | [] | no_license | Akira331/flask-cifar10 | 6c49db8485038731ce67d23f0972b9574746c7a7 | 283e7a2867c77d4b6aba7aea9013bf241d35d76c | refs/heads/master | 2023-06-14T16:35:06.384755 | 2021-07-05T14:09:15 | 2021-07-05T14:09:15 | 382,864,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:52c49577848819c4116b99c29c11e765e7a2d686e7ccb4dc7b84454bdf31510f
size 20854
| [
"business030301@gmail.com"
] | business030301@gmail.com |
170eb1cf38678e8baf10258b548535244e7f2996 | 12df1e58fe493c4a929e6d54a938f9b357964701 | /Day-5 Closest Value in BST.py | 68d748a07e7b9618aa7fce5bd8d1b7190170c74e | [] | no_license | Anshul-Dagar/100-Day-Coding-Challenge | 132dadc50b572428c7e33ceda329770d8766965a | 33f10cc6357d4ca3fa8a16cc954f6559f39e73bb | refs/heads/main | 2023-02-12T04:04:12.389433 | 2021-01-09T13:56:36 | 2021-01-09T13:56:36 | 326,140,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,768 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 19:30:49 2021
@author: ironman
"""
class BST:
def __init__(self,value):
self.value=value
self.left=None
self.right=None
def insert(self,value):
currentnode=self
while True:
if value<currentnode.value:
if currentnode.left is None:
currentnode.left=BST(value)
break
else:
currentnode=currentnode.left
else:
if currentnode.right is None:
currentnode.right=BST(value)
break
else:
currentnode=currentnode.right
return self
def contain(self,value):
currentnode=self
while currentnode is not None:
if value<currentnode.value:
currentnode=currentnode.left
elif value>currentnode.value:
currentnode=currentnode.right
else:
return True
return False
def findClosestValueInBst(target,tree):
return findClosestValueInBstHelper(target,tree,float("inf"))
def findClosestValueInBstHelper(target,tree,closest):
currentnode=tree
while currentnode is not None:
if abs(target-closest)>abs(target-currentnode.value):
closest=currentnode.value
if target>currentnode.value:
currentnode=currentnode.right
elif target<currentnode.value:
currentnode=currentnode.left
else:
break
return closest
tree=BST(10)
tree.insert(5)
tree.insert(15)
tree.insert(2)
tree.insert(5)
tree.insert(1)
ans=findClosestValueInBst(9,tree)
print(ans) | [
"noreply@github.com"
] | noreply@github.com |
4f763a66d6c6077358c6dadee57b52bddcadf918 | 2a9572e6f1cfb329a12d6835071483ec89ec6538 | /flask_test/flask_blog.py | 1ceec00beaf74f9b274f237f1860dfff21615f7f | [] | no_license | Cezar04/petproject | c8a4c810a8b05d0645dc36601539034dc35be6b5 | 9093d2435f779235db5f9e79417395e4dd13e8b0 | refs/heads/master | 2022-11-13T11:12:29.785362 | 2020-07-03T11:14:25 | 2020-07-03T11:14:25 | 276,875,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,859 | py | from flask import Flask, render_template, url_for, flash, redirect, request
from forms import registration_form, login_form, post_form
import data_manager
app = Flask(__name__)
app.config['SECRET_KEY'] = 'haker'
posts = [
{"author":"Gigel",
"title": "blog post 1",
"content":"First post content",
"date_posted": "marite 200001"},
{"author":"Gina gaina",
"title": "blog post 2",
"content":"First post content",
"date_posted": "marite 202"}
]
@app.route('/')
@app.route('/home')
def home():
return render_template('home.html', posts=posts)
@app.route('/about')
def about():
return render_template("about.html")
@app.route('/register', methods=["GET", "POST"])
def register():
form = registration_form()
if form.validate_on_submit():
flash(f"Account created for {form.username.data}!", "success")
return redirect(url_for("home"))
return render_template("register.html", title="Register", form=form)
@app.route('/login', methods=["GET", "POST"])
def login():
form = login_form()
if form.validate_on_submit():
if form.email.data == "admin@blog.com" and form.password.data == "1234":
flash('You are logged in!', 'success')
return redirect(url_for('home'))
else:
flash("Login failed, check username and password", 'danger')
return render_template("login.html", title="Login", form=form)
@app.route('/post/new', methods=["GET", "POST"])
def new_post():
form = post_form()
if form.validate_on_submit():
# post = posts(title=form.title.data, author=form.content.data, content=form.content.data)
flash("Post Created", "success")
return redirect(url_for("home"))
return render_template("create_post.html", title="New Post", form=form)
if __name__ == "__main__":
app.run(debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
6e40ec6f6b3b14aa33b9e1e5a07f218ba7ee36e0 | 00d2f3fde2c3d9e03a1babc958e35285d5798352 | /removedependent.py | 626bf7416873208dd75191cd10f065def3a4c318 | [] | no_license | N-S-Krishnan/Database-GUI-Elmasri-and-Navathe- | 14043e90c2e25e6c5ab080cc5efe985731479b93 | f8a60edad75505ad0587f3a3562cfc14cc0d018f | refs/heads/main | 2023-04-22T07:34:54.141788 | 2021-04-26T01:07:05 | 2021-04-26T01:07:05 | 361,572,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,474 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'removedependent.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QTableWidgetItem
import mysql.connector
from PyQt5.QtCore import QRegExp
from PyQt5.QtGui import QRegExpValidator
class Ui_RemoveDependent(object):
passedssn = -1
deldepname = ""
db = None # mysql connection
def __init__(self, obj):
self.passedssn = obj.textEdit.text()
def setupUi(self, RemoveDependent):
RemoveDependent.setObjectName("RemoveDependent")
RemoveDependent.resize(700, 505)
self.buttonBox = QtWidgets.QDialogButtonBox(RemoveDependent)
self.buttonBox.setGeometry(QtCore.QRect(310, 420, 321, 31))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayoutWidget = QtWidgets.QWidget(RemoveDependent)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(30, 80, 641, 201))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.tabdependents = QtWidgets.QTableWidget(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabdependents.sizePolicy().hasHeightForWidth())
self.tabdependents.setSizePolicy(sizePolicy)
self.tabdependents.setMinimumSize(QtCore.QSize(639, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.tabdependents.setFont(font)
self.tabdependents.setAutoFillBackground(True)
self.tabdependents.setGridStyle(QtCore.Qt.SolidLine)
self.tabdependents.setRowCount(10)
self.tabdependents.setColumnCount(4)
self.tabdependents.setObjectName("tabdependents")
item = QtWidgets.QTableWidgetItem()
self.tabdependents.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tabdependents.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tabdependents.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tabdependents.setHorizontalHeaderItem(3, item)
self.tabdependents.horizontalHeader().setSortIndicatorShown(False)
self.verticalLayout.addWidget(self.tabdependents)
self.label_2 = QtWidgets.QLabel(RemoveDependent)
self.label_2.setGeometry(QtCore.QRect(30, 360, 161, 16))
self.label_2.setObjectName("label_2")
self.empssn = QtWidgets.QLineEdit(RemoveDependent)
self.empssn.setGeometry(QtCore.QRect(90, 20, 101, 31))
self.empssn.setObjectName("empssn")
self.gobutton = QtWidgets.QPushButton(RemoveDependent)
self.gobutton.setGeometry(QtCore.QRect(40, 420, 93, 28))
self.gobutton.setObjectName("gobutton")
self.dname = QtWidgets.QTextEdit(RemoveDependent)
self.dname.setGeometry(QtCore.QRect(230, 350, 271, 31))
self.dname.setObjectName("dname")
self.label = QtWidgets.QLabel(RemoveDependent)
self.label.setGeometry(QtCore.QRect(30, 20, 71, 21))
self.label.setObjectName("label")
self.empname = QtWidgets.QLabel(RemoveDependent)
self.empname.setGeometry(QtCore.QRect(240, 20, 71, 21))
self.empname.setObjectName("empname")
self.empname_2 = QtWidgets.QTextEdit(RemoveDependent)
self.empname_2.setGeometry(QtCore.QRect(310, 20, 261, 31))
self.empname_2.setObjectName("empname_2")
self.label_6 = QtWidgets.QLabel(RemoveDependent)
self.label_6.setGeometry(QtCore.QRect(30, 310, 121, 16))
self.label_6.setObjectName("label_6")
self.depcount = QtWidgets.QTextEdit(RemoveDependent)
self.depcount.setGeometry(QtCore.QRect(210, 300, 31, 31))
self.depcount.setObjectName("depcount")
self.retranslateUi(RemoveDependent)
self.buttonBox.rejected.connect(RemoveDependent.reject)
QtCore.QMetaObject.connectSlotsByName(RemoveDependent)
self.empssn.setText(self.passedssn)
self.empssn.setDisabled(True)
self.select_data()
self.tabdependents.clicked.connect(self.select_depname)
self.gobutton.clicked.connect(self.processdelete)
def retranslateUi(self, RemoveDependent):
_translate = QtCore.QCoreApplication.translate
RemoveDependent.setWindowTitle(_translate("RemoveDependent", "RemoveDependent"))
self.tabdependents.setSortingEnabled(True)
item = self.tabdependents.horizontalHeaderItem(0)
item.setText(_translate("RemoveDependent", "Name"))
item = self.tabdependents.horizontalHeaderItem(1)
item.setText(_translate("RemoveDependent", "Sex"))
item = self.tabdependents.horizontalHeaderItem(2)
item.setText(_translate("RemoveDependent", "Date of Birth"))
item = self.tabdependents.horizontalHeaderItem(3)
item.setText(_translate("RemoveDependent", "Relationship"))
self.label_2.setText(_translate("RemoveDependent", "Name to Delete:"))
self.gobutton.setText(_translate("RemoveDependent", "Delete"))
self.label.setText(_translate("RemoveDependent", "Emp SSN"))
self.empname.setText(_translate("RemoveDependent", "Emp Name"))
self.label_6.setText(_translate("RemoveDependent", "Dependent Count"))
def select_data(self):
# Retrieve data on existing dependents that correspond to an ssn value given
try:
#print("select dependents")
self.db = mysql.connector.connect(option_files='mydb.conf')
cursor = self.db.cursor()
cursor.execute("select concat(fname, ' ', minit,' ', lname) empname from employee where ssn =" + str(self.passedssn) +
" for update ")
for row in cursor:
self.empname_2.setText(row[0])
#print(row)
self.empname_2.setDisabled(True)
nrows = cursor.rowcount
#print('nrows', nrows)
if nrows <= 0 :
msg = QtWidgets.QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("No employee with ssn "+ str(self.passedssn))
msg.setWindowTitle("Add Dependent")
msg.exec()
self.reject()
cursor.execute("select dependent_name, sex, bdate, relationship from dependent where essn =" + str(self.passedssn) +
" for update ")
result = cursor.fetchall()
nrows = cursor.rowcount
self.depcount.setText(str(nrows))
self.depcount.setDisabled(True)
self.tabdependents.setRowCount(0)
for rnum, rdata in enumerate(result):
self.tabdependents.insertRow(rnum)
for colnum, cdata in enumerate(rdata):
self.tabdependents.setItem(rnum, colnum,QTableWidgetItem(str(cdata)))
#self.tabdependents.setDisabled(True)
except mysql.connector.Error as e:
msg = QtWidgets.QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("SQL Error "+ str(e.msg))
msg.setWindowTitle("Add Dependent")
msg.exec()
def select_depname(self, item):
cellContent = item.data()
#print(cellContent) # test
#sf = "You clicked on {} {}".format(item.row(), item.column())
#print(sf)
myrow = item.row()
mycol = item.column()
if mycol == 0:
self.dname.setText(cellContent)
self.deldepname = cellContent
def processdelete(self, item):
if self.dname != "":
self.db = mysql.connector.connect(option_files='mydb.conf')
cursor = self.db.cursor()
# The number of variables we pass to the delete query is small enough where we can place them directly into
# the string that forms the sql query
cursor.execute("delete from dependent where essn =" + str(self.passedssn)+" and dependent_name = '"+self.deldepname+"'")
self.db.commit()
msg = QtWidgets.QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("Deleted dependent with essn "+ str(self.passedssn) + " dep name '" + self.deldepname +"'")
msg.setWindowTitle("Delete Dependent")
msg.exec()
self.dname.setText("")
self.select_data()
def reject(self):
#print("in reject")
self.db.commit()
self._close()
QDialog.reject(self)
| [
"noreply@github.com"
] | noreply@github.com |
de3fe45a87e82c646b0708bb94ef18a5f539f842 | 4d675034878c4b6510e1b45b856cc0a71af7f886 | /mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py | 06c1de2b9010fef13bd2322bbd3352d82a1f3e2f | [
"Apache-2.0",
"BSD-2-Clause-Views",
"MIT",
"BSD-2-Clause"
] | permissive | shinya7y/UniverseNet | 101ebc2ad8f15482ee45ea8d6561aa338a0fa49e | 3652b18c7ce68122dae7a32670624727d50e0914 | refs/heads/master | 2023-07-22T08:25:42.646911 | 2023-07-08T18:09:34 | 2023-07-08T18:09:34 | 263,555,721 | 407 | 58 | Apache-2.0 | 2023-01-27T01:13:31 | 2020-05-13T07:23:43 | Python | UTF-8 | Python | false | false | 4,482 | py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET
from mmdet.models.builder import HEADS
from .base_panoptic_fusion_head import BasePanopticFusionHead
@HEADS.register_module()
class HeuristicFusionHead(BasePanopticFusionHead):
"""Fusion Head with Heuristic method."""
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
test_cfg=None,
init_cfg=None,
**kwargs):
super(HeuristicFusionHead,
self).__init__(num_things_classes, num_stuff_classes, test_cfg,
None, init_cfg, **kwargs)
def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
"""HeuristicFusionHead has no training loss."""
return dict()
def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5):
"""Lay instance masks to a result map.
Args:
bboxes: The bboxes results, (K, 4).
labels: The labels of bboxes, (K, ).
masks: The instance masks, (K, H, W).
overlap_thr: Threshold to determine whether two masks overlap.
default: 0.5.
Returns:
Tensor: The result map, (H, W).
"""
num_insts = bboxes.shape[0]
id_map = torch.zeros(
masks.shape[-2:], device=bboxes.device, dtype=torch.long)
if num_insts == 0:
return id_map, labels
scores, bboxes = bboxes[:, -1], bboxes[:, :4]
# Sort by score to use heuristic fusion
order = torch.argsort(-scores)
bboxes = bboxes[order]
labels = labels[order]
segm_masks = masks[order]
instance_id = 1
left_labels = []
for idx in range(bboxes.shape[0]):
_cls = labels[idx]
_mask = segm_masks[idx]
instance_id_map = torch.ones_like(
_mask, dtype=torch.long) * instance_id
area = _mask.sum()
if area == 0:
continue
pasted = id_map > 0
intersect = (_mask * pasted).sum()
if (intersect / (area + 1e-5)) > overlap_thr:
continue
_part = _mask * (~pasted)
id_map = torch.where(_part, instance_id_map, id_map)
left_labels.append(_cls)
instance_id += 1
if len(left_labels) > 0:
instance_labels = torch.stack(left_labels)
else:
instance_labels = bboxes.new_zeros((0, ), dtype=torch.long)
assert instance_id == (len(instance_labels) + 1)
return id_map, instance_labels
def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds,
**kwargs):
"""Fuse the results of instance and semantic segmentations.
Args:
det_bboxes: The bboxes results, (K, 4).
det_labels: The labels of bboxes, (K,).
mask_preds: The masks results, (K, H, W).
seg_preds: The semantic segmentation results,
(K, num_stuff + 1, H, W).
Returns:
Tensor : The panoptic segmentation result, (H, W).
"""
mask_preds = mask_preds >= self.test_cfg.mask_thr_binary
id_map, labels = self._lay_masks(det_bboxes, det_labels, mask_preds,
self.test_cfg.mask_overlap)
seg_results = seg_preds.argmax(dim=0)
seg_results = seg_results + self.num_things_classes
pan_results = seg_results
instance_id = 1
for idx in range(det_labels.shape[0]):
_mask = id_map == (idx + 1)
if _mask.sum() == 0:
continue
_cls = labels[idx]
# simply trust detection
segment_id = _cls + instance_id * INSTANCE_OFFSET
pan_results[_mask] = segment_id
instance_id += 1
ids, counts = torch.unique(
pan_results % INSTANCE_OFFSET, return_counts=True)
stuff_ids = ids[ids >= self.num_things_classes]
stuff_counts = counts[ids >= self.num_things_classes]
ignore_stuff_ids = stuff_ids[
stuff_counts < self.test_cfg.stuff_area_limit]
assert pan_results.ndim == 2
pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape(
1, 1, -1)).any(dim=2)] = self.num_classes
return pan_results
| [
"noreply@github.com"
] | noreply@github.com |
bded7a0abc4bf1dc4955561f7e0715bcba19006f | 7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3 | /codeforces/cf326-350/cf334/b.py | 3d79209e1a77d7ad5f7c126cf1c70b802e0ece89 | [] | no_license | roiti46/Contest | c0c35478cd80f675965d10b1a371e44084f9b6ee | c4b850d76796c5388d2e0d2234f90dc8acfaadfa | refs/heads/master | 2021-01-17T13:23:30.551754 | 2017-12-10T13:06:42 | 2017-12-10T13:06:42 | 27,001,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | # -*- coding: utf-8 -*-
import sys,copy,math,heapq,itertools as it,fractions,re,bisect,collections as coll
mod = 10**9 + 7
class UnionFind:
def __init__(self, size):
self.rank = [0] * size
self.par = range(size)
self.g_num = size
def find(self, x):
if x == self.par[x]: return x
self.par[x] = self.find(self.par[x])
return self.par[x]
def same(self, x, y):
return self.find(x) == self.find(y)
def unite(self, x, y):
x, y = self.find(x), self.find(y)
if x == y: return
self.g_num -= 1
if (self.rank[x] > self.rank[y]):
self.par[y] = x
else:
self.par[x] = y
if (self.rank[x] == self.rank[y]): self.rank[y] += 1
def group_num(self):
return self.g_num
#prime = [1] * 1000005
#prime[0] = prime[1] = 0
#for i in xrange(int(1000005**0.5) + 1):
# if prime[i]:
# prime[2*i::i] = [0] * len(prime[2*i::i])
p, k = map(int, raw_input().split())
if k == 0:
print pow(p, p - 1, mod)
exit()
uf = UnionFind(p)
cnt = 0
for x in xrange(p):
if x == k*x % p:
if k > 1:
cnt += 1
else:
uf.unite(x, k*x % p)
ans = pow(p, uf.group_num() - cnt, mod)
print ans
| [
"roiti46@gmail.com"
] | roiti46@gmail.com |
f4777bda143cb4bb504692f3c4f72056032d0fb3 | ce7c501af175bcf7834d2f2b896bb6b7f8527bce | /main.py | 290f602c537818c5f2dc519cb94786b326e956aa | [] | no_license | Harikrishnan6336/Python_Learn_From_Home | b167657c8a8661dbb87e4c9263f9ab2555af4426 | 7d2567e11e6c45a44627108b194cbbd74c963cd7 | refs/heads/master | 2021-03-30T09:45:01.294468 | 2020-03-17T19:54:10 | 2020-03-17T19:54:10 | 248,039,844 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,640 | py | #This code is meant to be submitted to Python Learn From Home program by TinkerHub
class Tech:
info = {}
#The key of the dictionary is the name of the participant.
#And the value is a list comprising of [stack, designation, available time]
# Adds the tech stack of the participant
def addStacks(self,name):
stack=input("\nThe available Stacks are : Python, GO, Web, UI/UX, Flutter \nEnter a stack you are expert at/interested in[Case sensitive] : ")
self.info[name] = [None, None, None]
if name in self.info:
self.info[name][0] = stack
return
# Sets a participant as a mentor or a learner
# 1 denotes Mentor and 2 denotes Learner
def setMentorOrLearner(self,name):
desig = int(input("\nAre you a \n1.Mentor\n2.Learner\n\nEnter your choice : "))
if name in self.info:
self.info[name][1] = desig
return
# Sets the available time for a mentor
def setAvailableTime(self,name):
if self.info[name][1] == 1 :
available_time=int(input("\nEnter available time(in minutes) : "))
if name in self.info:
self.info[name][2] = available_time
return
#Gives the mentors satisfying the given specifications
def getMentor(self,stack,time):
flag = 0
print("\nThe available mentors are : ")
for mentor in self.info:
if self.info[mentor][0] == stack and self.info[mentor][2] >= time:
print("{} ".format(mentor))
flag = 1
if flag == 0:
print("None")
return
obj = Tech()
go = True
while go:
# A menu-driven program
print("\nWELCOME Tech learner/mentor")
print("\nMENU")
print("\n[1].Enter the details of a participant")
print("[2].Check the availablity of mentors")
print("[3].EXIT")
choice = int(input("\nEnter your choice : "))
if(choice == 1):
name = input("\nEnter your name : ")
obj.addStacks(name)
obj.setMentorOrLearner(name)
obj.setAvailableTime(name)
elif(choice == 2):
stack=input("\nThe available Stacks are : Python, GO, Web, UI/UX, Flutter,\nEnter a stack you are interested in learning [Case sensitive] : ")
time=int(input("Enter the required time you need mentoring for : "))
obj.getMentor(stack,time)
elif(choice == 3):
print("\nExiting \nThank You")
break
else:
print("INVALID CHOICE!!!")
flag = input("\nDo you want to continue (Y/N)? ")
if(flag == 'n' or flag == 'N'):
print("\nExiting \nThank You")
go = False
| [
"noreply@github.com"
] | noreply@github.com |
098998f8d95c610204722f8f0990286191492db1 | e9a0efee2089b1c3bf843633c7b226638bc09e0d | /DataStructures and Algorithms/Ammortization onArrays/CaesarCipher.py | 5bb577ea9668f61442f19fefda679a1816f4a8c4 | [
"MIT"
] | permissive | abhishekratnam/Datastructuresandalgorithmsinpython | 41226cf41d288e24dbe9cd9643650151cb2a1037 | 9339319f441755797f4d2818ac9cf742a63ab5ea | refs/heads/master | 2020-04-15T03:16:24.337787 | 2019-02-01T23:47:52 | 2019-02-01T23:47:52 | 164,342,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | class CaesarCipher:
"""Class for doing encryption and decryption using a Caesar Cipher."""
def __init__(self,shift):
"""Construct Caesar Cipher using given integer shift for rotation."""
encoder = [None] * 26
decoder = [None] * 26
for k in range(26):
encoder[k] = chr((k + shift)%26 + ord('A'))
decoder[k] = chr((k - shift)%26 + ord('A'))
self._forward = ''.join(encoder)
self._backward = ''.join(decoder)
def encrypt(self, message):
"""Return string representing encripted message."""
return self._transform(message, self._forward)
def decrypt(self, secret):
"""Returns the decrypted message with given secret."""
return self._transform(secret, self._backward)
def _transform(self, original, code):
"""Utility to perform transformation based on given code string."""
msg = list(original)
for k in range(len(msg)):
if msg[k].isupper():
j = ord(msg[k]) - ord('A')
msg[k] = code[j]
return ''.join(msg)
if __name__ == '__main__':
cipher = CaesarCipher(3)
message = "THE EAGLE IS IN PLAY; MEET AT JOE'S."
coded = cipher.encrypt(message)
print('Secret:', coded)
answer = cipher.decrypt(coded)
print('Message: ', answer)
| [
"noreply@github.com"
] | noreply@github.com |
0e28bd12d0e6eb69f2357634329e50e95b087d15 | 8444ea5cd42c09a7061b481fcb8135f72201d57e | /FileMiner/setup.py | 34c5deb70de7b44f9bacfa68b8bc8558705ba4a0 | [
"MIT"
] | permissive | Igerald/FileMiner-PythonPackageConcept | 9c7489bd5b4f75da713756c3a296cc5f6cd6c7d3 | 77ab9884a0e3848613fa75a5a143072cd3e6122c | refs/heads/master | 2020-09-20T13:13:53.682202 | 2019-11-27T18:26:47 | 2019-11-27T18:26:47 | 224,491,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | import setuptools
with open("README.md",'r') as f:
long_text = f.read()
setuptools.setup(
name = "FileMiner",
version = "1.0.0",
author = "Isaiah Gerald",
author_email = "e0dasci@gmail.com",
description = "pkg-template-description",
long_description = long_text,
long_description_content_type = "text/markdown",
url = "https://github.com/pypa/sampleproject",
packages = setuptools.find_packages(),
classifiers = ["Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",],
)
| [
"noreply@github.com"
] | noreply@github.com |
8eefdcd0f560f9474b98e085a4292b064e7dce77 | 65329299fca8dcf2e204132624d9b0f8f8f39af7 | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs/__init__.py | 21732f34697d6d2ac9444bb3316752278e827cf6 | [
"Apache-2.0"
] | permissive | darylturner/napalm-yang | bf30420e22d8926efdc0705165ed0441545cdacf | b14946b884ad2019b896ee151285900c89653f44 | refs/heads/master | 2021-05-14T12:17:37.424659 | 2017-11-17T07:32:49 | 2017-11-17T07:32:49 | 116,404,171 | 0 | 0 | null | 2018-01-05T16:21:37 | 2018-01-05T16:21:36 | null | UTF-8 | Python | false | false | 11,048 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import subTLVs_
class subTLVs(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-ipv6-reachability/prefixes/prefix/subTLVs. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container describes IS prefix sub-TLVs.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__subTLVs',)
_yang_name = 'subTLVs'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__subTLVs = YANGDynClass(base=YANGListType("subtlv_type",subTLVs_.subTLVs, yang_name="subTLVs", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='subtlv-type', extensions=None), is_container='list', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'levels', u'level', u'link-state-database', u'lsp', u'tlvs', u'tlv', u'mt-ipv6-reachability', u'prefixes', u'prefix', u'subTLVs']
def _get_subTLVs(self):
"""
Getter method for subTLVs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs/subTLVs (list)
YANG Description: List of subTLV types in the LSDB for the specified TLV.
"""
return self.__subTLVs
def _set_subTLVs(self, v, load=False):
"""
Setter method for subTLVs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs/subTLVs (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_subTLVs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subTLVs() directly.
YANG Description: List of subTLV types in the LSDB for the specified TLV.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("subtlv_type",subTLVs_.subTLVs, yang_name="subTLVs", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='subtlv-type', extensions=None), is_container='list', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """subTLVs must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("subtlv_type",subTLVs_.subTLVs, yang_name="subTLVs", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='subtlv-type', extensions=None), is_container='list', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
})
self.__subTLVs = t
if hasattr(self, '_set'):
self._set()
def _unset_subTLVs(self):
self.__subTLVs = YANGDynClass(base=YANGListType("subtlv_type",subTLVs_.subTLVs, yang_name="subTLVs", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='subtlv-type', extensions=None), is_container='list', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
subTLVs = __builtin__.property(_get_subTLVs)
_pyangbind_elements = {'subTLVs': subTLVs, }
import subTLVs_
class subTLVs(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-ipv6-reachability/prefixes/prefix/subTLVs. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container describes IS prefix sub-TLVs.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__subTLVs',)
_yang_name = 'subTLVs'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__subTLVs = YANGDynClass(base=YANGListType("subtlv_type",subTLVs_.subTLVs, yang_name="subTLVs", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='subtlv-type', extensions=None), is_container='list', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'levels', u'level', u'link-state-database', u'lsp', u'tlvs', u'tlv', u'mt-ipv6-reachability', u'prefixes', u'prefix', u'subTLVs']
def _get_subTLVs(self):
"""
Getter method for subTLVs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs/subTLVs (list)
YANG Description: List of subTLV types in the LSDB for the specified TLV.
"""
return self.__subTLVs
def _set_subTLVs(self, v, load=False):
"""
Setter method for subTLVs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs/subTLVs (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_subTLVs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subTLVs() directly.
YANG Description: List of subTLV types in the LSDB for the specified TLV.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("subtlv_type",subTLVs_.subTLVs, yang_name="subTLVs", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='subtlv-type', extensions=None), is_container='list', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """subTLVs must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("subtlv_type",subTLVs_.subTLVs, yang_name="subTLVs", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='subtlv-type', extensions=None), is_container='list', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
})
self.__subTLVs = t
if hasattr(self, '_set'):
self._set()
def _unset_subTLVs(self):
self.__subTLVs = YANGDynClass(base=YANGListType("subtlv_type",subTLVs_.subTLVs, yang_name="subTLVs", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='subtlv-type', extensions=None), is_container='list', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
subTLVs = __builtin__.property(_get_subTLVs)
_pyangbind_elements = {'subTLVs': subTLVs, }
| [
"dbarrosop@dravetech.com"
] | dbarrosop@dravetech.com |