text
stringlengths
38
1.54M
import numpy as np, matplotlib.pyplot as plt d = np.load('2457548.45923.npz') NOT_REAL_ANTS="0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 23, 24, 25, 26, 27, 28, 29, 30, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 44, 45, 46, 47, 48, 49, 50, 51, 52, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 82, 83, 84, 85, 86, 87, 90, 91, 92, 93, 94, 95, 98, 99, 100, 101, 102, 103, 106, 107, 108, 109, 110, 111" ex_ants = [int(a) for a in NOT_REAL_ANTS.split(', ')] keep_ants = [a for a in range(d['gains'].shape[2]) if a not in ex_ants] good_ants = [a for a in keep_ants if a not in [81,22,43]] low = list(range(0,102)) orbcomm = list(range(375,390)) others = list(range(695,705))+list(range(759,761))+list(range(768,771))+list(range(830,832))+list(range(850,852))+list(range(921,1023)) others+=[510,511,512,850,851,852,853,854] msk=low+orbcomm+others msk_spec = np.zeros((1024)) good_chans = [c for c in range(1024) if c not in msk] freqs = np.linspace(100,200,num=1024) for c in range(1024): if c in good_chans: msk_spec[c] = 1 f,ax = plt.subplots() ax.fill_between(freqs[202:301],0,3, facecolor='k',alpha=0.2) ax.fill_between(freqs[581:681],0,3,facecolor='k',alpha=0.2) for i,a in enumerate(good_ants): if a==80: continue if i <= 8: ls='-' else: ls='--' msk_gain = np.ma.masked_where(np.abs(d['gains'][0,:,a])*msk_spec==0., d['gains'][0,:,a]) #ax.plot(np.fft.fftshift(np.fft.fftfreq(1024,np.diff(freqs)[0]*1e6))*1e9, # np.abs(np.fft.fftshift(np.fft.ifft(msk_gain))), # ls,label=str(a),lw=2) ax.plot(freqs,np.abs(msk_gain),ls,label=str(a),lw=2) ax.set_ylim(0,2.75) #ax.set_ylim(-np.pi,np.pi) #ax.set_xlim(100,200) #plt.legend() #plt.xlabel('Frequency [MHz]',size=12) plt.ylabel('Amplitude [arb.]',size=12) plt.grid() plt.show()
""" This problem was asked by Amazon. Run-length encoding is a fast and simple method of encoding strings. The basic idea is to represent repeated successive characters as a single count and character. For example, the string "AAAABBBCCDAA" would be encoded as "4A3B2C1D2A". Implement run-length encoding and decoding. You can assume the string to be encoded have no digits and consists solely of alphabetic characters. You can assume the string to be decoded is valid. """ import unittest def encode(s: str) -> str: if not s: return '' counter = 1 char = s[1] encoded = '' for c in s[1:]: if c is char: counter += 1 else: encoded += f'{counter}{char}' char = c counter = 1 encoded += f'{counter}{char}' return encoded def decode(s: str) -> str: if not s: return '' counter = int(s[0]) decoded = counter * s[1] for i, c in enumerate(s[2:]): if i % 2 is 0: counter = int(c) else: decoded += counter * c return decoded class TestSolution(unittest.TestCase): def test_given_encode(self) -> None: self.assertEqual(encode('AAAABBBCCDAA'), '4A3B2C1D2A') def test_given_decode(self) -> None: self.assertEqual(decode('4A3B2C1D2A'), 'AAAABBBCCDAA') if __name__ == '__main__': unittest.main()
In this kata you are required to, given a string, replace every letter with its position in the alphabet. If anything in the text isn't a letter, ignore it and don't return it. "a" = 1, "b" = 2, etc. soln: from string import ascii_lowercase LETTERS = {letter: str(index) for index, letter in enumerate(ascii_lowercase, start=1)} def alphabet_position(text): text = text.lower() numbers = [LETTERS[character] for character in text if character in LETTERS] return ' '.join(numbers) from string import ascii_lowercase cap_letter = {small_letter: str(index) for index, small_letter in enumerate(ascii_lowercase, start=1)} def alphabet_position(text): text = text.lower() nos = [cap_letter [char] for char in text if char in cap_letter ] return ' '.join(nos) other coders soln: def alphabet_position(text): return ' '.join(str(ord(c) - 96) for c in text.lower() if c.isalpha()) or def alphabet_position(s): return " ".join(str(ord(c)-ord("a")+1) for c in s.lower() if c.isalpha()) or alphabet = 'abcdefghijklmnopqrstuvwxyz' def alphabet_position(text): if type(text) == str: text = text.lower() result = '' for letter in text: if letter.isalpha() == True: result = result + ' ' + str(alphabet.index(letter) + 1) return result.lstrip(' ') or from string import ascii_lowercase def alphabet_position(text): return ' '.join(str(ascii_lowercase.index(n.lower()) + 1) for n in text if n.isalpha()) or import re def alphabet_position(text): return " ".join([str(ord(i) - 96) for i in re.findall('[a-z]', text.lower())])
#!/usr/bin/python3 # -*- coding: utf-8 -*- # (C) w:fa:User:Reza1615, 2020 # (C) w:fa:User:Huji, 2020 # Distributed under the terms of the CC-BY-SA 3.0 import pywikibot from persiantools import digits from datetime import datetime month_names = [ "ژانویه", "فوریه", "مارس", "آوریل", "مه", "ژوئن", "ژوئیه", "اوت", "سپتامبر", "اکتبر", "نوامبر", "دسامبر" ] summary = "ربات:ساخت رده حذف زمان‌دار" content = "{{جا:الگو:شروع حذف زمان‌دار}}" now = datetime.now() year = digits.en_to_fa(str(now.year)) month = month_names[now.month - 1] day = digits.en_to_fa(str(now.day)) title = 'رده:صفحه‌های حذف زمان‌دار در %s %s %s' % (day, month, year) p = pywikibot.Page(pywikibot.getSite("fa"), title) if not p.exists(): p.put(content, summary)
import os.path import shutil import sys import numpy as np import gc import tensorflow as tf from nets import nets from data import data from runs import preprocessing import pickle def evaluate_network(opt): # Initialize dataset and creates TF records if they do not exist dataset = data.ImagenetDataset(opt) # Repeatable datasets for training val_dataset = dataset.create_dataset(set_name='val', repeat=True) # Handles to switch datasets handle = tf.placeholder(tf.string, shape=[]) iterator = tf.data.Iterator.from_string_handle( handle, val_dataset.output_types, val_dataset.output_shapes) val_iterator = val_dataset.make_initializable_iterator() ################################################################################################ ################################################################################################ # Declare DNN ################################################################################################ # Get data from dataset dataset image, label = iterator.get_next() from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices() gpus = [x.name for x in local_device_protos if x.device_type == 'GPU'] if opt.dnn.name == 'resnet': image_split = [tf.reshape(t, [-1, data._DEFAULT_IMAGE_SIZE, data._DEFAULT_IMAGE_SIZE, 3]) for t in zip(tf.split(image, len(gpus)))] elif opt.dnn.name == 'inception': image = tf.cast(image, tf.float32) image_split = [tf.reshape(t, [-1, 299, 299, 3]) for t in zip(tf.split(image, len(gpus)))] label_split = [tf.reshape(t, [-1]) for t in zip(tf.split(label, len(gpus)))] # Call DNN logits_list = [] for idx_gpu, gpu in enumerate(gpus): with tf.device(gpu): with tf.name_scope('gpu_' + str(idx_gpu)) as scope: to_call = getattr(preprocessing, opt.dnn.name) u_images = [] for _image in tf.unstack(image_split[idx_gpu], num=opt.hyper.batch_size / len(gpus), axis=0): im_tmp = to_call(_image, opt) u_images.append(im_tmp) _images = tf.stack(u_images) to_call = getattr(nets, opt.dnn.name) logit, activations = to_call(_images, opt) tf.get_variable_scope().reuse_variables() logits_list.append(logit) logits = tf.reshape(tf.stack(logits_list, axis=0), [-1, 1001]) pred_label = tf.argmax(logits, axis=1) acc_1 = tf.nn.in_top_k(predictions=logits, targets=label, k=1, name='top_1_op') acc_5 = tf.nn.in_top_k(predictions=logits, targets=label, k=5, name='top_5_op') ################################################################################################ config = tf.ConfigProto(allow_soft_placement=True)#inter_op_parallelism_threads=80, #intra_op_parallelism_threads=80, # #config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: print("RESTORE") print(opt.log_dir_base + opt.name) if opt.dnn.name == 'resnet': saver = tf.train.Saver(max_to_keep=opt.max_to_keep_checkpoints) saver.restore(sess, tf.train.latest_checkpoint(opt.log_dir_base + opt.name + '/')) elif opt.dnn.name == 'inception': variable_averages = tf.train.ExponentialMovingAverage(0.9999) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) # saver = tf.train.Saver() saver.restore(sess, tf.train.latest_checkpoint(opt.log_dir_base + opt.name + '/')) ''' ckpt = tf.train.get_checkpoint_state(opt.log_dir_base + opt.name + '/') if ckpt and ckpt.model_checkpoint_path: if os.path.isabs(ckpt.model_checkpoint_path): # Restores from checkpoint with absolute path. saver.restore(sess, ckpt.model_checkpoint_path) else: # Restores from checkpoint with relative path. saver.restore(sess, os.path.join(opt.log_dir_base + opt.name + '/', ckpt.model_checkpoint_path)) ''' val_handle_full = sess.run(val_iterator.string_handle()) # Run one pass over a batch of the validation dataset. sess.run(val_iterator.initializer) acc_tmp_1 = 0.0 acc_tmp_5 = 0.0 total = 0 total_iter = int(dataset.num_total_images / opt.hyper.batch_size) + 1 for num_iter in range(total_iter): acc_val_1, acc_val_5 = sess.run( [acc_1, acc_5], feed_dict={handle: val_handle_full}) for i in range(len(acc_val_1)): total += 1 acc_tmp_1 += acc_val_1[i] acc_tmp_5 += acc_val_5[i] print('iteration:', str(num_iter) + '/' + str(total_iter-1), 'running_top-1:', acc_tmp_1 / float(total), 'running_top-5:', acc_tmp_5 / float(total)) sys.stdout.flush() if total > 0: ret_acc = acc_tmp_1 / float(total) ret_acc_5 = acc_tmp_5 / float(total) sys.stdout.flush() sess.close() return ret_acc, ret_acc_5 def run(opt): ################################################################################################ # Read experiment to run ################################################################################################ # Skip execution if instructed in experiment if opt.skip: print("SKIP") quit() print('name:', opt.name) print('factor:', opt.dnn.factor) print('batch size:', opt.hyper.batch_size) ################################################################################################ # Define training and validation datasets through Dataset API ################################################################################################ record_acc, record_acc_5 = evaluate_network(opt) tf.reset_default_graph() gc.collect() print("top-1:", record_acc) print("top-5: ", record_acc_5) sys.stdout.flush() print(":)")
#!/bin/python3 import math import os import random import re import sys class SinglyLinkedListNode: def __init__(self, node_data): self.data = node_data self.next = None class SinglyLinkedList: def __init__(self): self.head = None def print_singly_linked_list(node, sep, fptr): while node: fptr.write(str(node.data)) node = node.next if node: fptr.write(sep) # Complete the insertNodeAtTail function below. # # For your reference: # # SinglyLinkedListNode: # int data # SinglyLinkedListNode next # # def insertNodeAtTail(head, data): new_node = SinglyLinkedListNode(data) if head==None: head = new_node temp = head while(temp.next!=None): temp = temp.next temp.next = new_node new_node.next = None return head if __name__ == '__main__':
from scipy import signal import numpy as np # linear algebra import matplotlib.pyplot as plt DeltaQ = 150 #Internal heat gain difference between day and night #day_DeltaQ = DeltaQ #Day Delta Q internal [W] Qday = 400 #Day internal heat gain W nightQ = Qday - DeltaQ #Night internal heat gain t1= 8 #Presence from [hour] t2= 23 #Presence until [hour] days_hours = 24 #number_of_hour_in_oneday + start hour at 0 days = 365 #number of simulation days periods = 24*3600*days #in seconds (day_periods*365 = years) pulse_width = (t2-t1)/24 # % of the periods phase_delay = t1 #in seconds #t = np.linspace(0, 24*3600, 24) t= np.linspace(0,1,(days_hours*days)+1,endpoint=False) #+1 start from 0 pulseday = signal.square(2 * np.pi* days * t,duty=pulse_width) pulseday = np.clip(pulseday, 0, 1) # add delay to array pulseday=np.roll(pulseday,phase_delay) #______pulse week generator______________ week = days/7 pulse_w = 0.99 pulse_week = signal.square(2*np.pi*week*t,duty=pulse_w) pulse_week = np.clip(pulse_week, 0, 1) #create simulation time time_t = np.linspace(0,periods,(days_hours*days)+1) #Internal heat gain Qinternal = nightQ + pulseday*DeltaQ*pulse_week Qinternal=Qinternal[np.newaxis] Qinternal=Qinternal.T #Plot 48 hours #plt.plot(time_t[0:48], Qinternal[0:48]) #plt.ylabel('Internal heat gain (W)') #plt.xlabel('time (sec)') #plt.legend(loc=2) #print(Qinternal) Qinternal=np.delete(Qinternal, -1, 0)
#!usr/bin/pytthon # coding:utf-8 from numpy import * # 已知用户物品矩阵为dataMat ,行为物品,列为用户 # 计算用户相似度 # ----------------- # 余弦相似度计算 # 欧式距离计算 def eulidSim(inA, inB): return 1.0 / (1.0 + linalg.norm(inA, inB)) # 皮尔逊系数计算 def pearsSim(inA, inB): if len(inA) <3 : return 1.0 # 将值从[-1, 1]映射到[0, 1] return 0.5+0.5*corrcoef(inA, inB, rowvar=0)[0][1] # 余弦相似度 def cosSim(inA, inB): num = float(inA*inB.T) denom = linalg.norm(inA) * linalg.norm(inB) # 将值从[-1, 1]映射到[0, 1] return 0.5 + 0.5 * (num / denom) # 获取向量中非零元素的平均值 def avg(intA): return float(mean(intA[intA > 0], 1)[0][0]) # 基于用户相似度的推荐 # 数据矩阵,用户,相似度计算方法,物品 def userSimiliar(dataMat, user, simMeas, N = 5): userNum = shape(dataMat)[0] #获取所有用户数 usersSim= zeros((1, userNum)) #用户相似度向量 # 用户未评分的物品的下标 unratedItems = nonzero(dataMat[user,:].A ==0)[1] unratedItemGrad = dict() #未评分物品的评分值 unratedItemNum = dict() #根据其他用户,对未评分物品进行评分的次数 # 用户i对商品的平均评分 avgRate = avg(dataMat[user,:]) for item in unratedItems: unratedItemNum[item] = 0 unratedItemGrad[item] = 0 for i in range(userNum): if i == user: continue # 获取用户相似度 usersSim[0][i] = simMeas(dataMat[user,:], dataMat[i,:])#计算用户i和用户user的相似度 #获取用户i评分但是用户user没有评分的物品的下标 unratedItem = list(set(nonzero(logical_or(dataMat[user,:]>0, dataMat[i,:]>0))[1]) - set(nonzero(dataMat[user,:])[1])) if len(unratedItem) == 0 :continue #说明用户i所评分过的物品,用户User已评分过 for item in unratedItem: # print unratedItemGrad[item], # print dataMat[i,item] unratedItemGrad[item] = unratedItemGrad[item]+ usersSim[0][i] * dataMat[i,item] unratedItemNum[item] = unratedItemNum[item]+usersSim[0][i] for item in unratedItems: if unratedItemNum[item] == 0: continue unratedItemGrad[item] = unratedItemGrad[item] / unratedItemNum[item] recommand = sorted(unratedItemGrad.items(), key=lambda jj:jj[1], reverse=True) recommandItems = list() for j in range(N): recommandItems.append(recommand[j]) return recommandItems # 基于物品相似度的推荐引擎 # 数据矩阵、用户、相似度计算方法、物品。行对应用户、列对应物品 def standEst(dataMat, user, simMeas, item): # 获取物品数 n = shape(dataMat)[1] simTotal = 0.0; ratSimTotal = 0.0 for j in range(n): # 获取用户对当前物品的评分 userRating = dataMat[user,j] if userRating == 0: continue # 寻找对物品ITEM评分,且对当前物品有评分的用户 # logical_and逻辑与计算,只有两个位置的都时真的时候才为真,此处计算共同对物品j和物品ITEM进行评分的用户 # nonzeron返回非零元素的下标 overLap = nonzero(logical_and(dataMat[:,item].A >0, dataMat[:,j].A >0))[0] if len(overLap) == 0: similarity = 0 # 计算两列的相似度,即同一用户对item和j的评分的相似度 else : similarity = simMeas(dataMat[overLap,item] , dataMat[overLap,j]) # 获取总相似度 simTotal += similarity # 获取用户总评价 ratSimTotal += similarity * userRating if simTotal == 0: return 0 else : return ratSimTotal/ simTotal def recommend(dataMat, user, N=3, simMeas=cosSim, estMethod = standEst): # 获取用户没有评分的物品 unratedItems = nonzero(dataMat[user,:].A == 0) if len(unratedItems) == 0: return 'you rated everything' itemScores = [] # 填充用户对所有没有评分的物品的预测评分 for item in unratedItems: estimatedScore = estMethod(dataMat, user, simMeas, item) itemScores.append((item, estimatedScore)) # 根据评分对物品进行排序,返回Top-N return sorted(itemScores, key=lambda jj:jj[1], reverse=True)[:N] def simBetweenUsers(dataMat, users, simMeas): # 初始化用户相似度矩阵 simResult = zeros(((users[-1]+1), (users[-1]+1))) for user1 in users: for user2 in users: if(user1 != user2): simResult[user1][user2] = simMeas(dataMat[user1,:], dataMat[user2,:]) return simResult
#!/usr/bin/python3 # import socket programming library import socket import json import time import csv import hadoop import pandas from io import StringIO # import thread module from _thread import * import threading print_lock = threading.Lock() def processJobInfo(stats): statsstr = '\n'.join(stats) print(statsstr) jobsInfo = pandas.read_csv(StringIO(statsstr), sep=',') for index, row in jobsInfo.iterrows(): print(row) #hadoop.getJobInfo(row['JobId']) # thread fuction def threaded(c, addr): # lock acquired by client print_lock.acquire() print('Connected to :', addr[0], ':', addr[1]) print_lock.release() stat_file = "/mnt/temp/state_" + addr[0] + "_" + str(addr[1]); lenght = 0; wait_for_stat_flag = 1 wait_for_stat_data = 2 status = wait_for_stat_flag; done = False while not done: if status == wait_for_stat_flag: print("Wait for Start Flag") # data received from client data = c.recv(1024).decode('ascii') if data.startswith('lenght:'): lenght = int(data.split(":")[1]) c.send(("ready\n").encode('ascii')); status = wait_for_stat_data else: #data.startswith('done'): print_lock.acquire() print('Connection Closed by Client') print_lock.release() done = True c.close() elif status == wait_for_stat_data: # data received from client print("Wait for Data" + str(lenght)) data = c.recv(lenght); data = c.recv(lenght); stats = data.decode('ascii'); c.send(str("done\n").encode('ascii')) processJobInfo(stats.split('\n')[2:]); # send back reversed string to client status = wait_for_stat_flag; def Main(): host = "" # reverse a port on your computer # in our case it is 12345 but it # can be anything port = 4964 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((host, port)) print("socket binded to post", port) # put the socket into listening mode s.listen(5) print("socket is listening") # a forever loop until client wants to exit while True: # establish connection with client c, addr = s.accept() # Start a new thread and return its identifier start_new_thread(threaded, (c, addr, )) s.close() if __name__ == '__main__': Main()
import cv2 import cvzone from cvzone.SelfiSegmentationModule import SelfiSegmentation import os cap = cv2.VideoCapture('1.avi') cap.set(3, 640) cap.set(4, 480) cap.set(cv2.CAP_PROP_FPS, 60) segmentor = SelfiSegmentation() fpsReader = cvzone.FPS() imgBg = cv2.imread('Images/1.jpg') imgBg = cv2.resize(imgBg,(640, 480)) imgIndex = 0 listImg = os.listdir('Images') imgList = [] for imgPath in listImg: img = cv2.imread(f'Images/{imgPath}') img = cv2.resize(img,(640, 480)) imgList.append(img) while True: success, img = cap.read() # imgOut = segmentor.removeBG(img, (0, 255, 0), threshold=0.8) imgOut = segmentor.removeBG(img, imgList[imgIndex], threshold=0.8) imgStacked = cvzone.stackImages([img, imgOut],2,0.8) _, imgStacked = fpsReader.update(imgStacked, color=(0, 0, 255)) cv2.imshow('image out', imgStacked) key = cv2.waitKey(1) if key == ord('a'): if imgIndex > 0: imgIndex -=1 elif key == ord('d'): if imgIndex < len(imgList) - 1: imgIndex +=1 elif key == ord('q'): break
import tensorflow as tf import models.bert_util.bert_utils from explain.explain_model import CrossEntropyModeling, CorrelationModeling from explain.pairing.match_predictor import build_model from tf_util.tf_logging import tf_logging from trainer.multi_gpu_support import get_multiple_models, get_avg_loss, get_avg_tensors_from_models, \ get_batch2feed_dict_for_multi_gpu, get_concat_tensors_from_models, get_concat_tensors_list_from_models, get_train_op from trainer.tf_train_module import get_train_op2 class LMSModel: def __init__(self, modeling_option, bert_hp, lms_config, num_gpu): ex_modeling_class = { 'ce': CrossEntropyModeling, 'co': CorrelationModeling }[modeling_option] def build_model_fn(): return build_model(ex_modeling_class, bert_hp, lms_config) self.num_gpu = num_gpu self.match_predictor = None self.match_predictor_list = None self.bert_hp = bert_hp if num_gpu == 1: tf_logging.info("Using single GPU") task_model_, ex_model_, match_predictor = build_model_fn() loss_tensor = match_predictor.loss per_layer_loss = match_predictor.all_losses batch2feed_dict = models.bert_util.bert_utils.batch2feed_dict_4_or_5_inputs logits = task_model_.logits ex_score_tensor = ex_model_.get_ex_scores(lms_config.target_idx) per_layer_logit_tensor = match_predictor.per_layer_logits self.match_predictor = match_predictor else: main_models, ex_models, match_predictor_list = zip(*get_multiple_models(build_model_fn, num_gpu)) loss_tensor = get_avg_loss(match_predictor_list) per_layer_loss = get_avg_tensors_from_models(match_predictor_list, lambda match_predictor: match_predictor.all_losses) batch2feed_dict = get_batch2feed_dict_for_multi_gpu(main_models) logits = get_concat_tensors_from_models(main_models, lambda model: model.logits) def get_loss_tensor(model): t = tf.expand_dims(tf.stack(model.get_losses()), 0) return t ex_score_tensor = get_concat_tensors_from_models(ex_models, lambda model: model.get_ex_scores(lms_config.target_idx)) per_layer_logit_tensor = \ get_concat_tensors_list_from_models(match_predictor_list, lambda model: model.per_layer_logits) self.match_predictor_list = match_predictor_list self.logits = logits self.batch2feed_dict = batch2feed_dict self.ex_score_tensor = ex_score_tensor self.loss_tensor = loss_tensor self.per_layer_logit_tensor = per_layer_logit_tensor self.per_layer_loss = per_layer_loss # logits for nli classification def get_logits(self): return self.logits # logits for match score of each layers def get_lms(self): return self.per_layer_logit_tensor def get_train_op(self, lr, max_steps): if self.num_gpu == 1: with tf.variable_scope("match_optimizer"): train_op = get_train_op2(self.match_predictor.loss, lr, "adam", max_steps) else: with tf.variable_scope("match_optimizer"): train_op = get_train_op([m.loss for m in self.match_predictor_list], lr, max_steps) return train_op
def try5(): d = Definer() m = d.matcher f1 = CFunc(PrimTypes.INT) f1.add(CPtr(CPtr(PrimTypes.VOID))) m.add_rule(CPtr(PrimTypes.VOID), 'PVOID') m.add_rule(PrimTypes.INT, 'INT') m.tree_names_list.append('MYSTRUC') # m.add_rule(CPtr(f1), 'MY_PROC') s1 = CStruct() s1.add(PrimTypes.UINT, 'a') u1 = CUnion() u1.add(PrimTypes.CHAR, 'b1') u1.add(CPtr(CTypeRef('MYSTRUC')), 'b2') s1.add(u1, '') s1.add(PrimTypes.INT, 'c') # mk = BraceDefMaker('TEST_16', s1, d) e1 = CEnum() e1.add('RED', 10) e1.add('GREEN', 20) e1.add('BLUE', 30) mk = BraceDefMaker('TEST16', e1, d) print(mk.typedef()) print(mk.pure_def()) print(mk.mixed_def()) # try5()
from uncertainties import ufloat from uncertainties.umath import * print print '*************Values *********************' print H = ufloat(67.4000000, 1.40000000) # H = 67.4+/-1.4 h = H/100.0 print 'h = ', h obh2 = ufloat(0.0220700000, 0.000330000000) print 'obh2 = ' , obh2 ob = obh2/h**2 ErrorLnOb = 0.00124 LnOb = log(ob) #******************************* #print 'ErrorLnOb0 =' , ErrorLnOb #print 'ob0 = ', ob #print 'ErrorOb0 = ' , ob.s #********************************* print print '**** check if ErrorLnOmega_b = ErrorOmega_b/ Omega_b******* ' print ErrorOb = exp(ErrorLnOb) print 'LnOb0 = ', LnOb print 'ErrorLnOb = ', (ErrorLnOb/LnOb) print 'ErrorOb /Ob = ', ob.s / ob.n print 'ErrorLnOb x Ob = ', LnOb.s * ob.n
numList = [] for x in range(5): numList.append(int(input("Number " + str(x+1) + ": "))) print("You entered: " + str(numList)) avg = 0 for x in numList: avg += int(x) avg = avg / len(numList) print("The average is: " + str(avg)) print("The range is: " + str(len(numList))) rem = int(input("Which item do you want to remove?: ")) del numList[numList.index(rem)] print("The new list has the following values: " + str(numList)) avg = 0 for x in numList: avg += int(x) avg = avg / len(numList) print("The average is: " + str(avg)) print("The range is: " + str(len(numList)))
# -*- mode: Python; -*- '''Implementation of vrt-data.''' import os from subprocess import Popen, PIPE from libvrt.args import BadData from libvrt.args import transput_args # TODO here AND rel-tools to RAISE not EXIT on failure from libvrt.bins import SORT from libvrt.dataline import valuegetter # remnants of vrt-meta seem good here from libvrt.metaname import nametype, isname from libvrt.metamark import marktype def parsearguments(argv, *, prog = None): description = ''' Present the annotated tokens in a VRT document as a relation in the form of a Tab-Separated Values (TSV) document, complete with a head and unique rows in the body. Initially identified positional names become field names, or default to v1, v2, up to the initially encountered number of fields, while the corresponding values become the content of records. This is not a VRT validator. ''' parser = transput_args(description = description, inplace = False) parser.add_argument('--quiet', '-q', action = 'store_true', help = ''' do not warn when a token has a different number of annotations than the first name comment or the first token (default is to warn four times) ''') parser.add_argument('--mark', '-m', metavar = 'value', type = marktype, default = b'', help = ''' a mark to use as the value when there are too few fields (defaults to the empty string - should the default be visible?) ''') group = parser.add_mutually_exclusive_group(required = True) group.add_argument('--tag', '-t', metavar = 'name', type = nametype, help = ''' name to use for a tag field to number the records of the resulting relation ''') group.add_argument('--unique', '-u', action = 'store_true', help = ''' omit duplicate records (implies sorting) ''') args = parser.parse_args() args.prog = prog or parser.prog return args def main(args, ins, ous): '''Transput VRT (bytes) in ins to TSV (bytes) in ous.''' data = ( line for line in ins if not line.isspace() if ( not (line.startswith(b'<')) or line.startswith(b'<!-- #vrt positional attributes: ') ) ) line = next(data, None) if line is None: raise BadData('no data, no head') if line.startswith(b'<'): left, rest = line.rstrip(b'->\r\n').split(b':') head = rest.split() else: head = [ 'v{}'.format(k).encode('utf-8') for k, v in enumerate(line.rstrip(b'\r\n').split(b'\t'), start = 1) ] if (not all(isname(name) for name in head) or len(set(head)) < len(head)): raise BadData('bad names') values = valuegetter(head, missing = args.mark, warn = not args.quiet, many = 4, prog = args.prog) if args.tag: if args.tag in head: raise BadData('tag name in head already') def ship(rec, tag): ous.write(b'\t'.join((*rec, str(tag).encode('utf-8')))) ous.write(b'\n') return ship(head, args.tag.decode('utf-8')) ship(values(line), 1) for k, line in enumerate(data, start = 2): ship(values(line), k) else: return 0 else: # encoded streams seem only available from Python 3.6 on, # which is too new, so working in UTF-8 again - maybe that is # nicer anyway def ship(out, rec): out.write(b'\t'.join(rec)) out.write(b'\n') return ship(ous, head) ous.flush() with Popen([ SORT, '--unique' ], env = dict(os.environ, LC_ALL = 'C'), stdin = PIPE, stdout = ous, stderr = None) as proc: ship(proc.stdin, values(line)) for line in data: ship(proc.stdin, values(line))
# Regex ''' You have a test string S. Your task is to match the string hackerrank. This is case sensitive. ''' Regex_Pattern = r'hackerrank' # Do not delete 'r'. import re Test_String = input() match = re.findall(Regex_Pattern, Test_String) print("Number of matches :", len(match)) # Matching Anything But a Newline ''' You have a test string S. Your task is to write a regular expression that matches only and exactly strings of form: abc.def.ghi.jkx, where each variable a, b, c, d, e, f, g, h, i, j, k, x can be any single character except the newline. ''' regex_pattern = r"...\....\....\....$" # Do not delete 'r'. import re import sys test_string = input() match = re.match(regex_pattern, test_string) is not None print(str(match).lower()) # Matching Digits & Non-Digit Characters ''' You have a test string S. Your task is to match the pattern xxXxxXxxxx Here x denotes a digit character, and X denotes a non-digit character. ''' Regex_Pattern = r"\d\d\D\d\d\D\d\d\d\d" # Do not delete 'r'. import re print(str(bool(re.search(Regex_Pattern, input()))).lower()) # Matching Whitespace & Non-Whitespace Character ''' You have a test string S. Your task is to match the pattern XXxXXxXX Here, x denotes whitespace characters, and X denotes non-white space characters. ''' Regex_Pattern = r"\S\S\s\S\S\s\S\S" # Do not delete 'r'. import re print(str(bool(re.search(Regex_Pattern, input()))).lower()) # Matching Word & Non-Word Character ''' You have a test string S. Your task is to match the pattern xxxXxxxxxxxxxxXxxx Here, x denotes word character, and X denotes non-word character. ''' Regex_Pattern = r"\w\w\w\W\w{10}\W\w\w\w" # Do not delete 'r'. import re print(str(bool(re.search(Regex_Pattern, input()))).lower()) # Matching Start & End ''' You have a test string S. Your task is to match the pattern Xxxxx. Here, x denotes a word character, and X denotes a digit. S must start with a digit X and end with . symbol. S should be 6 characters long only. ''' Regex_Pattern = r"^\d\w{4}\.$" # Do not delete 'r'. import re print(str(bool(re.search(Regex_Pattern, input()))).lower())
# -------------- # Importing header files import numpy as np # Path of the file has been stored in variable called 'path' data = np.genfromtxt(path,delimiter=",",skip_header=1) print ("\nData: \n\n",data) print ("\nTypeof data: \n\n",type(data)) #New record new_record=[[50, 9, 4, 1, 0, 0, 40, 0]] census = np.concatenate((data,new_record)) print (census) #Code starts here # -------------- #Code starts here age = census[:,0] print (age) max_age = np.max(age) print (max_age) min_age = np.min(age) print (min_age) age_mean = np.mean(age) print (age_mean) age_std = np.std(age) print (age_std) # -------------- #Code starts here race_0 = census[census[:,2] == 0] race_1 = census[census[:,2] == 1] race_2 = census[census[:,2] == 2] race_3 = census[census[:,2] == 3] race_4 = census[census[:,2] == 4] len_0 = len(race_0) print(len_0) len_1 = len(race_1) print (len_1) len_2 = len(race_2) print (len_2) len_3 = len(race_3) print (len_3) len_4 = len(race_4) print (len_4) minority_race = 3 print (minority_race) print (census[:,2].size) # -------------- #Code starts here senior_citizens = census[census[:,0] > 60] working_hours_sum = np.sum(senior_citizens[:,6]) senior_citizens_len = len(senior_citizens) avg_working_hours = working_hours_sum/senior_citizens_len print(avg_working_hours) # -------------- #Code starts here high = census[census[:,1] > 10] low = census[census[:,1] <= 10] avg_pay_high = np.mean(high[:,7]) print (avg_pay_high) avg_pay_low = np.mean(low[:,7]) print (avg_pay_low) if avg_pay_high > avg_pay_low: print ('True') else: print ('False')
import os #declarar variables Comensal,Azafata,precio1,precio2,precio3="","",0,0,0 #INPUT Comensal=os.sys.argv[1] Azafata=os.sys.argv[2] precio1=int(os.sys.argv[3]) precio2=int(os.sys.argv[4]) precio3=int(os.sys.argv[5]) #PROCESSING total =int(precio1 + precio2 + precio3) #OUTPUT print(" ############################################# ") print(" # CEVICHERIA - SEÑOR DELFIN ") print(" ############################################# ") print(" #Comensal: " , Comensal + " Azafata: " , Azafata) print(" #Arroz con mariscos: " , precio1) print(" #refresco de lima: " , precio2) print(" #ocopa: " , precio3) print("# Total : ", total) #condicional multiple #si el pedido es mayor a 100 soles mostrarle al comprador que ha ganado un plato de ocopa mas refresco #si el pedido es igual a 70 soles mostrarle al comprador que ha ganado un refresco #si el pedido es menor a 40 soles decirle que no ha ganado nada if(total>100): print("usted ha ganado un plato de ocopa mas refresco") if(total==70): print("usted a ganado un refresco") if(total<40): print("usted no ha ganadado nada") #fin_if
from typing import Dict, List, Optional from pydantic import BaseModel class RecognitionModel(BaseModel): error: bool = False error_msg: str = "" data: Dict = {}
from django.db import models # Create your models here. class Employee(models.Model): fullname = models.CharField(max_length=30) job = models.CharField(max_length=15) salary = models.IntegerField() email = models.EmailField(max_length=50) def __str__(self): return self.fullname + "," + self.job
from .distributions import GMMDiag, GMMFull, MoE import tensorflow as tf import tensorflow.compat.v1 as tf1 from ..utils.tf_utils import log_normalize class GMMApprox(object): def __init__(self, log_unnormalized_prob, gmm=None, k=10, loc=0., std=1., ndim=None, loc_tril=None, samples=20, temp=1., cov_type='diag'): """ :param log_unnormalized_prob: Unnormalized log density to estimate :type log_unnormalized_prob: a tensorflow function that takes [batch_size, ndim] as input and returns [batch_size] :param gmm: :param k: number of components for GMM approximation :param loc: for initialization, mean :param std: for initialization, standard deviation :param ndim: """ self.log_prob = log_unnormalized_prob self.ndim = ndim self.temp = temp if gmm is None: assert ndim is not None, "If no gmm is defined, should give the shape of x" if cov_type == 'diag': log_priors = tf.Variable(10. * tf.ones(k)) locs = tf.Variable(tf.random_normal((k, ndim), loc, std)) log_std_diags = tf.Variable(tf1.log(std/k * tf.ones((k, ndim)))) gmm = GMMDiag(log_priors=log_priors, locs=locs, log_std_diags=log_std_diags) elif cov_type == 'full': log_priors = tf.Variable(10. * tf.ones(k)) locs = tf.Variable(tf1.random_normal((k, ndim), loc, std)) loc_tril = loc_tril if loc_tril is not None else std/k tril_cov = tf.Variable(loc_tril ** 2 * tf.eye(ndim, batch_shape=(k, ))) gmm = GMMFull(log_priors=log_priors, locs=locs, tril_cov=tril_cov) else: raise ValueError("Unrecognized covariance type") self.num_samples = samples self.gmm = gmm @property def sample_shape(self): return (self.num_samples, ) @property def opt_params(self): """ Parameters to train :return: """ return self.gmm.opt_params def mixture_lower_bound(self, k): samples = self.gmm.component_sample(k, self.sample_shape) log_qs = self.gmm.log_prob(samples) log_ps = self.temp * self.log_prob(samples) return tf.reduce_mean(log_ps - log_qs) def mixture_elbo_fast(self, *args): samples_conc = tf.reshape( tf.transpose(self.gmm.all_components_sample(self.sample_shape), perm=(1, 0, 2)) , (-1, self.ndim)) # [k * nsamples, ndim] log_qs = tf.reshape(self.gmm.log_prob(samples_conc), (self.gmm.k, self.num_samples)) log_ps = tf.reshape(self.temp * self.log_prob(samples_conc), (self.gmm.k, self.num_samples)) component_elbos = tf.reduce_mean(log_ps-log_qs, axis=1) return tf.reduce_sum(component_elbos * tf.exp(log_normalize(self.gmm.log_priors))) # log_qs = def mixture_elbo(self): component_elbos = tf.stack([self.mixture_lower_bound(k) for k in range(self.gmm.k)]) return tf.reduce_sum(component_elbos * tf.exp(log_normalize(self.gmm.log_priors))) @property def cost(self): return -self.mixture_elbo_fast() # return -self.mixture_elbow() class GMMApproxCond(GMMApprox): def __init__(self, log_unnormalized_prob, moe=None, ndim_in=None, ndim_out=None, **kwargs): """ :param log_unnormalized_prob: Unnormalized log density of the conditional model to estimate log p(y | x) :type log_unnormalized_prob: a tensorflow function that takes [batch_size, ndim_y] [batch_size, ndim_x] as input and returns [batch_size] :param moe: :type moe: MoE """ self.ndim_in = ndim_in # x self.ndim_out = ndim_out # y GMMApprox.__init__(self, log_unnormalized_prob, gmm=moe, **kwargs) @property def moe(self): return self.gmm def mixture_elbow_fast(self, x): # importance sample mixture weights samples, weights = self.moe.sample_is(x) loq_qs = self.moe.log_prob(samples, x) log_ps = self.temp * self.log_prob(y, x) elbos_is = tf.reduce_mean(log_ps-loq_qs, axis=1) return tf.reduce_sum(elbos_is * tf1.exp(weights)) def cost(self, x): return -self.mixture_elbow_fast(x)
# -*- coding:utf-8 -*- import sys sys.path.append('..') from Model.BookModel import BookModel from Model.ISBNBookModel import IsbnBookModel from View.BookView import BookView class BookController(object): def __init__(self): self.Model = BookModel() self.View = BookView() #上传图书ISBN信息 ''' levelNum VARCHAR(10) NOT NULL, subTitle VARCHAR(100), author VARCHAR(100) NOT NULL, date VARCHAR(20) NOT NULL, imagesMedium VARCHAR(100) NOT NULL, imagesLarge VARCHAR(100) NOT NULL, publisher VARCHAR(50) NOT NULL, isbn VARCHAR(13) NOT NULL PRIMARY KEY, title VARCHAR(100) NOT NULL, summary VARCHAR(400) NOT NULL ''' def BookUpLoadISBNcontroller(self,request): if request.get('title',default=None): isbn = request['isbn'] levelNum = request['levelNum'] subTitle = request.get('subTitle',default=None) author = request['author'] date = request['date'] imagesMedium = request['imagesMedium'] imagesLarge = request['imagesLarge'] publisher = request['publisher'] isbn = request['isbn'] title = request['title'] summary = request['summary'] result = IsbnBookModel.BookUpLoadIsbn(isbn=isbn,levelNum=levelNum,subTitle=subTitle,author=author,date=date,imagesLarge=imagesLarge,imagesMedium=imagesMedium,publisher=publisher,title=title,summary=summary) else: isbn = request['isbn'] result = IsbnBookModel.BookUpLoadIsbn(isbn=isbn) res = self.View.BookUpLoadISBNView(result) return res #传入的参数是一个字典,表示UserBook 属性 ''' bookId INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, userId INTEGER NOT NULL, isbn VARCHAR(13) NOT NULL, tag1 VARCHAR(100) NOT NULL, tag2 VARCHAR(100) NOT NULL, place VARCHAR(100) NOT NULL, isGroupVisible INTEGER NOT NULL, lend INTEGER NOT NULL, ''' def BookUpLoadController(self,request): #取出字典里的字典 userId = request['userId'] isbn = request['isbn'] tag1 = request['tag1'] tag2 = request['tag2'] place = request['place'] isGroupVisible = request['isGroupVisible'] lend = request['lend'] bookId = self.Model.BookUpLoadUser(userId=userId,isbn=isbn,tag1=tag1,tag2=tag2,place=place,isGroupVisible=isGroupVisible,lend=lend) res = self.View.BookUpLoadView(bookId) return res #删除图书视图 def BookDeleteController(self,request): #取出字典里的字典 bookId = request['bookId'] result = self.Model.BookDelete(bookId) res = self.View.BookDeleteView(result) return res #管理图书视图 ''' bookId INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, userId INTEGER NOT NULL, isbn VARCHAR(13) NOT NULL, tag1 VARCHAR(100) NOT NULL, tag2 VARCHAR(100) NOT NULL, place VARCHAR(100) NOT NULL, isGroupVisible INTEGER NOT NULL, lend INTEGER NOT NULL, ''' def BookChangeBookInfoController(self,request): #取出不同的参数请求调动不同的model函数 bookId = request.get('bookId',default=0) tag1 = request.get('tag1',default=None) tag2 = request.get('tag2',default=None) place = request.get('place',default=None) isGroupVisible = request.get('isGroupVisible',default=None) lend = request.get('lend',default=None) #修改结果 0表示正确 1表示失败 2表示没有修改需求 results = [2,2,2,2,2] if tag1: result = self.Model.EditTag1(bookId,tag1) if not result: results[0] = 1 else: results[0] = 0 if tag2: result = self.Model.EditTag2(bookId,tag2) if not result: results[1] = 1 else: results[1] = 0 if place: result = self.Model.EditPlace(bookId,place) if not result: results[2] = 1 else: results[2] = 0 if isGroupVisible: result = self.Model.ShareBook(bookId,isGroupVisible) if not result: results[3] = 1 else: results[3] = 0 if lend: result = self.Model.LendBook(bookId,lend) if not result: results[4] = 1 else: results[4] = 0 res = self.View.BookChangeBookInfoView(results) return res #查询图书视图 """ Args: 可能的查询参数 bookId INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, userId INTEGER NOT NULL, isbn VARCHAR(13) NOT NULL, tag1 VARCHAR(100) NOT NULL, tag2 VARCHAR(100) NOT NULL, place VARCHAR(100) NOT NULL, isGroupVisible INTEGER NOT NULL, lend INTEGER NOT NULL, Returns: 一个字典(也可能是字典列表),用户信息,之后封装为JSON发出 """ def BookQueryController(self,request): #print(request['bookId']) #提取出所有可能的查询参数 print(request) bookId = request.get('bookId',default=0) userId = request.get('userId',default=0) isbn = request.get('isbn',default=None) tag1 = request.get('tag1',default=None) tag2 = request.get('tag2',default=None) place = request.get('place',default=None) isGroupVisible = request.get('isGroupVisible',default=2) lend = request.get('lend',default=0) #返回的是一个字典列表 result = self.Model.QueryBook(bookId=bookId,userId=userId,isbn=isbn,tag1=tag1,tag2=tag2,place=place,isGroupVisible=isGroupVisible,lend=lend) res = self.View.BookQueryView(result) return res
max = 1000000 primes = [True]*max for i in range(2,max): x = 2*i while x < max: primes[x] = False x += i print(primes[:30]) count = 1 x = 2 while count < 10001: x += 1 if primes[x]: count += 1 print(x)
import re, time, json, threading, requests, traceback from datetime import datetime import paho.mqtt.client as mqtt import DAN, SA def df_func_name(df_name): return re.sub(r'-', r'_', df_name) MQTT_broker = getattr(SA,'MQTT_broker', None) MQTT_port = getattr(SA,'MQTT_port', 1883) MQTT_User = getattr(SA,'MQTT_User', None) MQTT_PW = getattr(SA,'MQTT_PW', None) MQTT_encryption = getattr(SA,'MQTT_encryption', None) device_model = getattr(SA,'device_model', None) device_name = getattr(SA,'device_name', None) ServerURL = getattr(SA,'ServerURL', None) device_id = getattr(SA,'device_id', None) if device_id==None: device_id = DAN.get_mac_addr() IDF_list = getattr(SA,'IDF_list', []) ODF_list = getattr(SA,'ODF_list', []) exec_interval = getattr(SA,'exec_interval', 1) IDF_funcs = {} for idf in IDF_list: IDF_funcs[idf] = getattr(SA, df_func_name(idf), None) ODF_funcs = {} for odf in ODF_list: ODF_funcs[odf] = getattr(SA, df_func_name(odf), None) def on_connect(client, userdata, flags, rc): if not rc: print('MQTT broker: {}'.format(MQTT_broker)) if ODF_list == []: print('ODF_list is not exist.') return topic_list=[] for odf in ODF_list: topic = '{}//{}'.format(device_id, odf) topic_list.append((topic,0)) if topic_list != []: r = client.subscribe(topic_list) if r[0]: print('Failed to subscribe topics. Error code:{}'.format(r)) else: print('Connect to MQTT borker failed. Error code:{}'.format(rc)) def on_disconnect(client, userdata, rc): print('MQTT Disconnected. Re-connect...') client.reconnect() def on_message(client, userdata, msg): samples = json.loads(msg.payload) ODF_name = msg.topic.split('//')[1] if ODF_funcs.get(ODF_name): ODF_data = samples['samples'][0][1] ODF_funcs[ODF_name](ODF_data) else: print('ODF function "{}" is not existed.'.format(ODF_name)) def mqtt_pub(client, deviceId, IDF, data): topic = '{}//{}'.format(deviceId, IDF) sample = [str(datetime.today()), data] payload = json.dumps({'samples':[sample]}) status = client.publish(topic, payload) if status[0]: print('topic:{}, status:{}'.format(topic, status)) def on_register(result): func = getattr(SA, 'on_register', None) if func: func(result) def MQTT_config(client): client.username_pw_set(MQTT_User, MQTT_PW) client.on_connect = on_connect client.on_message = on_message client.on_disconnect = on_disconnect if MQTT_encryption: client.tls_set() client.connect(MQTT_broker, MQTT_port, keepalive=60) DAN.profile['dm_name'] = device_model DAN.profile['df_list'] = IDF_list + ODF_list if device_name: DAN.profile['d_name']= device_name if MQTT_broker: DAN.profile['mqtt_enable'] = True result = DAN.device_registration_with_retry(ServerURL, device_id) on_register(result) if MQTT_broker: mqttc = mqtt.Client() MQTT_config(mqttc) mqttc.loop_start() while True: try: for idf in IDF_list: if not IDF_funcs.get(idf): print('IDF function "{}" is not existed.'.format(idf)) continue IDF_data = IDF_funcs.get(idf)() if not IDF_data: continue if type(IDF_data) is not tuple: IDF_data=[IDF_data] if MQTT_broker: mqtt_pub(mqttc, device_id, idf, IDF_data) else: DAN.push(idf, IDF_data) time.sleep(0.001) if not MQTT_broker: for odf in ODF_list: if not ODF_funcs.get(odf): print('ODF function "{}" is not existed.'.format(odf)) continue ODF_data = DAN.pull(odf) if not ODF_data: continue ODF_funcs.get(odf)(ODF_data) time.sleep(0.001) except Exception as e: if str(e).find('mac_addr not found:') != -1: print('Reg_addr is not found. Try to re-register...') DAN.device_registration_with_retry(ServerURL, device_id) else: exception = traceback.format_exc() print(exception) if MQTT_broker: mqttc.reconnect() time.sleep(1) time.sleep(exec_interval)
from histogram_functions import get_words from histogram_lists import count_words import random def sample_by_frequency(histogram): # Find the most any word appears and set max_frequency equal to that value max_frequency = 0 for item in histogram: if item[1] > max_frequency: max_frequency = item[1] # Generate a random frequency from one to max_frequency rand_frequency = random.randint(0, max_frequency) while True: # choose a random index to check rand_index = random.randint(0, len(histogram) - 1) selected_list = histogram[rand_index] # check if the selected words frequency is higher or equal to the randomly generated frequency # if it is, return the word if selected_list[1] >= rand_frequency: return selected_list[0] def higher_markov_sampling(histogram): # Find the most any word appears and set max_frequency equal to that value max_frequency = 0 for key, value in histogram.items(): if value > max_frequency: max_frequency = value # Generate a random frequency from one to max_frequency rand_frequency = random.randint(0, max_frequency) while True: # choose a random index to check rand_index = random.randint(0, len(histogram) - 1) list_from_histrogram = list(histogram) selected_word = list_from_histrogram[rand_index] # check if the selected words frequency is higher or equal to the randomly generated frequency # if it is, return the word if histogram[selected_word] >= rand_frequency: return selected_word def higher_markov_check_frequency(histogram): frequency_dict = {} for _ in range(10000): word_pair = random.choice(list(histogram)) word = higher_markov_sampling(histogram[word_pair]) if word_pair in frequency_dict: following_words = frequency_dict[word_pair] if word in following_words: following_words[word] += 1 else: following_words[word] = 1 else: frequency_dict[word_pair] = {word:1} return frequency_dict def check_frequency(histogram): frequency_dict = {} for _ in range(10000): word = sample_by_frequency(histogram) if word in frequency_dict: frequency_dict[word] += 1 else: frequency_dict[word] = 1 return frequency_dict if __name__ == '__main__': word_list = get_words('GoT_text.txt') counts = count_words(word_list) sample = sample_by_frequency(counts) frequency_check = check_frequency(counts) print(sample) print(frequency_check)
from hwt.hdlObjects.operator import Operator from hwt.hdlObjects.operatorDefs import AllOps from hwt.hdlObjects.types.defs import BOOL from hwt.hdlObjects.value import Value class EventCapableVal(Value): def _hasEvent__val(self, now): BoolVal = BOOL.getValueCls() return BoolVal(self.updateTime == now, BOOL, self.vldMask, now) def _hasEvent(self, now): if isinstance(self, Value): return self._hasEvent__val(now) else: return Operator.withRes(AllOps.EVENT, [self], BOOL) def _onFallingEdge__val(self, now): v = self._hasEvent__val(now) v.val = v.val and not self.val return v def _onFallingEdge(self, now): if isinstance(self, Value): return self._onFallingEdge__val(now) else: return Operator.withRes(AllOps.FALLIGN_EDGE, [self], BOOL) def _onRisingEdge__val(self, now): v = self._hasEvent__val(now) v.val = v.val and self.val return v def _onRisingEdge(self, now): if isinstance(self, Value): return self._onRisingEdge__val(now) else: return Operator.withRes(AllOps.RISING_EDGE, [self], BOOL)
import logging class LastLogsHandler(logging.Handler): def __init__(self, size): logging.Handler.__init__(self) self.strA = [] self.i = 0 self.len = size for i in range(self.len): self.strA.append(None) def emit(self, record): self.strA[self.i] = record self.i += 1 if self.i == self.len: self.i = 0 def getStr(self): s = "" for n in range(self.len): k = self.i - n-1 if k < 0: k = k + self.len if self.strA[k] is not None: s += self.format(self.strA[k])+"\r\n" return s class Verify(object): def __init__(self): pass @staticmethod def dataIntegrityServers(data): sdata = data[1:2] numrows = len(sdata) check = 0 errors_str = "" for row in range(numrows): cvalue = sdata[row][3] for row1 in range(row + 1, numrows): if cvalue == sdata[row1][3]: errors_str = errors_str + ("Duplicated TCP port: " + cvalue + "\nLine: " + str(row1 + 1) + "\n") check = 1 return check, errors_str @staticmethod def dataIntegrityUnits(data): sdata = data[1:2] numrows = len(sdata) check = 0 errors_str = "" for row in range(numrows): cvalue = sdata[row][2] cunit = sdata[row][1] for row1 in range(row+1, numrows): if cvalue == sdata[row1][2] and cunit == sdata[row1][1]: errors_str = errors_str + ("Duplicated unit name: " + cvalue + "\n" + "Line: " + str(row1+1) + "\n") check = 1 return check, errors_str @staticmethod def dataIntegrityChannels(data): # DO NOT MESS WITH ORDER datype_list = ['bool', 'unsigned_short', 'short', 'float', 'integer', 'unsigned_integer', 'double', 'string'] modbustype_list = ['coils', 'discrete_inputs', 'holding_registers', 'analog_inputs'] sdata = data[1:] numrows = len(sdata) units_id = [] channels = [] check = 0 errors_str = "" for row in range(numrows): cvalue = sdata[row][2] cunit = sdata[row][1] for row1 in range(row+1, numrows): if cvalue == sdata[row1][2] and cunit == sdata[row1][1]: errors_str = errors_str + ("Duplicated channel name: " + cvalue + "\n" + "Line: " + str(row1+1) + "\n") check = 1 # check if data types is valid if sdata[row][5].lower() not in datype_list: errors_str = errors_str + ("Data type is not valid: " + sdata[row][5] + "\n" + "Line: " + str(row+1) + "\n") check = 1 if sdata[row][8].lower() not in modbustype_list: errors_str = errors_str + ("Modbus type is not valid: " + sdata[row][8] + "\n" + "Line: " + str(row+1) + "\n") check = 1 # check if datatype is consistent with modbus datatype # from 0 to 2 in datatype_list length must be ==1 if (sdata[row][5].lower() in datype_list[:2]) and sdata[row][7] != '1': errors_str = errors_str + ("Lenght of data not valid: " + sdata[row][7] + "\n" + "Line: " + str(row+1) + "\n") check = 1 # from 3 to 4 in datatyp_list length must be == 2 elif (sdata[row][5].lower()in datype_list[3:5]) and sdata[row][7] != '2': errors_str = errors_str + ("Lenght of data not valid: " + sdata[row][7] + "\n" + "Line: " + str(row+1) + "\n") check = 1 elif sdata[row][5].lower() == 'double' and sdata[row][7] != '4': # double errors_str = errors_str + ("Lenght of data not valid: " + sdata[row][7] + "\n" + "Line: " + str(row+1) + "\n") check = 1 if sdata[row][1] != '': # unit_id, modbus address, modbus length channel = [int(sdata[row][1]), int(sdata[row][6]), int(sdata[row][7])] channels.append(channel) if sdata[row][1] != '' and int(sdata[row][1]) not in units_id: units_id.append(int(sdata[row][1])) # unist addresses = [None] * max(units_id) # cria lista com espaco para todas a unidades for unit in units_id: # cria espaco de adressos total adress_size = 0 for channel in channels: if unit == channel[0]: if channel[2] + channel[1] > adress_size: adress_size = channel[2] + channel[1] addresses[unit - 1] = ([None] * adress_size) for address in channels: for i in range(address[1], address[1] + address[2]): unit = address[0] if addresses[unit-1][i] != 1: addresses[unit-1][i] = 1 else: errors_str = errors_str + 'Modbus address busy at: unit' + str(unit)+', address: '+str(i)+'\n' check = 1 return check, errors_str
from numpy import * import pylab from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plot set_printoptions(precision = 3) #Datos: distribución normal multivariada en 3d mean = [1,5,10] cov = [[-1,1,2], [-2,3,1],[4,0,3]] d = random.multivariada_normal(mean,cov,1000) #representació gráfica de los datos fig1 = plot.figure() sp = fig1.gca(projection = '3d') sp.scatter(d[:,0],d[:,1],d[:,2]) plot.show() #ANALISIS PCA: #Paso 1: Calcular la matriz de covarianza de los datos (N x N): d1= d - d.mean(0) matcov = dot(d1.transpose(), d1) #Paso 2: Obtener los valores y vectores propios(Diagonalización) de la matrix de covarianza: valp1,vecp1 = linalg.eig(matcov) #Paso 3: Dedidir que vectores son los relevantes representando los valores propios en orden decreciente ind_creciente = argsort(valp1) # orden creciente ind_decre = ind_creciente [::–1 ] #orden de creciente val_decre= valp1[ind_decre] # valores propios en orden decreciente vec_decre= vecp1[:,ind_decre] # ordena r tambien vectores propios pylab.plot(val_decre,’o–’) pylab.show( ) # proyectar la nueva base definida por los vectores propios d_PCA = zeros((d.shape[0],d.shape[1])) for i in range(d.shape[0]): for j in range(d.shape[1]): d_PCA[i,j] = dot(d[i,:], vecp1[:,j]) # recuperar datos originales invirtiendo la proyección (reconstrucción) d_recon = zeros ((d.shape[0], d.shape[1])) for i in range(d.shape[0]): for j in range (d.shape[1]): d_recon[i] += d_PCA[i, j]*vecp1[:,j] #comprobar que se recuperan los datos originales: allclose(d,d_recon) # Proyectar datos a la nueva base definida por los dos vectores propios con mayor valor propio(espacio PCA 2D) d_PCA2 = zeros((d.shape[0],2)) for i in range(d.shape[0]): for i in range(2): d_PCA2[i,j] = dot(d[i,:],vec_decre[:,j]) #reconstruir datos invirtiendo la proyección PCA 2D d_recon2 = zeros((d.shape[0], d.shape[1])) for i in range(d.shape[0]): for j in range(2): d_recon2[i] += d_PCA2[i, j]*vec_decre[:,j] #representación gráfica de los datos: fig2 = plot.figure() sp2 = fig2.gca(projection = '3d') sp2.scatter(d_recon2[:,0], d_recon2[:,1], d_recon2[:,2],c='r',marker='x') plot.show()
#1/2/3 import numpy as np import pandas as pd site=('https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/04_Apply/Students_Alcohol_Consumption/student-mat.csv') df = pd.read_csv(site) #4 df_slice = df.loc[:,'school':'guardian'] #5/6/8 str_func = lambda x: x.capitalize() df['Mjob'], df['Fjob'] = (df['Mjob'].apply(str_func), df['Fjob'].apply(str_func)) #7 print(df.tail(3)) #9 def majority(_df_): if _df_ >= 17: return True else: return False df['legal_drinker'] = df['age'].apply(majority) #10 def mult_10(_df_): if type(_df_) == int: return _df_*10 else: return _df_ df_10 = df.applymap(mult_10) print(df_10.head(10))
import os #imput densidad=int(os.sys.argv[1]) altura=int(os.sys.argv[2]) gravedad=float(os.sys.argv[3]) #processing presion=(densidad*altura*gravedad) #output if (presion>42): print("presion en estado critico para el producto") if(presion<42 and presion>30): print("tengan cuidado") if(presion<20): print("todo ok")
#!/usr/bin/python """ unpickle.py: A subset of unpickling code from pickle.py. - We don't want to ship the pickler. - We only need to handle the v2 protocol - We only need to handle the parts of the protocol that we use. For reference, the _RuntimeType hierarchy seems to require 15 unique instructions + PROTO and STOP, which are trivial. """ import marshal import sys mloads = marshal.loads # Copied from types.py class _C: def _m(self): pass ClassType = type(_C) # NOTE: I think INST and OBJ aren't used? We want NEWOBJ. class _EmptyClass: pass # Pickle opcodes. See pickletools.py for extensive docs. The listing # here is in kind-of alphabetical order of 1-character pickle code. # pickletools groups them by purpose. MARK = '(' # push special markobject on stack STOP = '.' # every pickle ends with STOP POP = '0' # discard topmost stack item POP_MARK = '1' # discard stack top through topmost markobject DUP = '2' # duplicate top stack item FLOAT = 'F' # push float object; decimal string argument INT = 'I' # push integer or bool; decimal string argument BININT = 'J' # push four-byte signed int BININT1 = 'K' # push 1-byte unsigned int LONG = 'L' # push long; decimal string argument BININT2 = 'M' # push 2-byte unsigned int NONE = 'N' # push None PERSID = 'P' # push persistent object; id is taken from string arg BINPERSID = 'Q' # " " " ; " " " " stack REDUCE = 'R' # apply callable to argtuple, both on stack STRING = 'S' # push string; NL-terminated string argument BINSTRING = 'T' # push string; counted binary string argument SHORT_BINSTRING = 'U' # " " ; " " " " < 256 bytes UNICODE = 'V' # push Unicode string; raw-unicode-escaped'd argument BINUNICODE = 'X' # " " " ; counted UTF-8 string argument APPEND = 'a' # append stack top to list below it BUILD = 'b' # call __setstate__ or __dict__.update() GLOBAL = 'c' # push self.find_class(modname, name); 2 string args DICT = 'd' # build a dict from stack items EMPTY_DICT = '}' # push empty dict APPENDS = 'e' # extend list on stack by topmost stack slice GET = 'g' # push item from memo on stack; index is string arg BINGET = 'h' # " " " " " " ; " " 1-byte arg INST = 'i' # build & push class instance LONG_BINGET = 'j' # push item from memo on stack; index is 4-byte arg LIST = 'l' # build list from topmost stack items EMPTY_LIST = ']' # push empty list OBJ = 'o' # build & push class instance PUT = 'p' # store stack top in memo; index is string arg BINPUT = 'q' # " " " " " ; " " 1-byte arg LONG_BINPUT = 'r' # " " " " " ; " " 4-byte arg SETITEM = 's' # add key+value pair to dict TUPLE = 't' # build tuple from topmost stack items EMPTY_TUPLE = ')' # push empty tuple SETITEMS = 'u' # modify dict by adding topmost key+value pairs BINFLOAT = 'G' # push float; arg is 8-byte float encoding TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py # Protocol 2 PROTO = '\x80' # identify pickle protocol NEWOBJ = '\x81' # build object by applying cls.__new__ to argtuple EXT1 = '\x82' # push object from extension registry; 1-byte index EXT2 = '\x83' # ditto, but 2-byte index EXT4 = '\x84' # ditto, but 4-byte index TUPLE1 = '\x85' # build 1-tuple from stack top TUPLE2 = '\x86' # build 2-tuple from two topmost stack items TUPLE3 = '\x87' # build 3-tuple from three topmost stack items NEWTRUE = '\x88' # push True NEWFALSE = '\x89' # push False LONG1 = '\x8a' # push long from < 256 bytes LONG4 = '\x8b' # push really big long # An instance of _Stop is raised by Unpickler.load_stop() in response to # the STOP opcode, passing the object that is the result of unpickling. class _Stop(Exception): def __init__(self, value): self.value = value class Unpickler(object): def __init__(self, file): """This takes a file-like object for reading a pickle data stream. The protocol version of the pickle is detected automatically, so no proto argument is needed. The file-like object must have two methods, a read() method that takes an integer argument, and a readline() method that requires no arguments. Both methods should return a string. Thus file-like object can be a file object opened for reading, a StringIO object, or any other custom object that meets this interface. """ self.readline = file.readline self.read = file.read self.memo = {} def load(self): """Read a pickled object representation from the open file. Return the reconstituted object hierarchy specified in the file. """ self.mark = object() # any new unique object self.stack = [] self.append = self.stack.append read = self.read dispatch = self.dispatch try: while 1: key = read(1) dispatch[key](self) except _Stop as stopinst: return stopinst.value # Return largest index k such that self.stack[k] is self.mark. # If the stack doesn't contain a mark, eventually raises IndexError. # This could be sped by maintaining another stack, of indices at which # the mark appears. For that matter, the latter stack would suffice, # and we wouldn't need to push mark objects on self.stack at all. # Doing so is probably a good thing, though, since if the pickle is # corrupt (or hostile) we may get a clue from finding self.mark embedded # in unpickled objects. def marker(self): stack = self.stack mark = self.mark k = len(stack)-1 while stack[k] is not mark: k = k-1 return k dispatch = {} def load_eof(self): raise EOFError dispatch[''] = load_eof def load_proto(self): proto = ord(self.read(1)) if not 0 <= proto <= 2: raise ValueError, "unsupported pickle protocol: %d" % proto dispatch[PROTO] = load_proto def load_persid(self): pid = self.readline()[:-1] self.append(self.persistent_load(pid)) dispatch[PERSID] = load_persid def load_binpersid(self): pid = self.stack.pop() self.append(self.persistent_load(pid)) dispatch[BINPERSID] = load_binpersid def load_none(self): self.append(None) dispatch[NONE] = load_none def load_false(self): self.append(False) dispatch[NEWFALSE] = load_false def load_true(self): self.append(True) dispatch[NEWTRUE] = load_true def load_int(self): data = self.readline() if data == FALSE[1:]: val = False elif data == TRUE[1:]: val = True else: try: val = int(data) except ValueError: val = long(data) self.append(val) dispatch[INT] = load_int def load_binint(self): self.append(mloads('i' + self.read(4))) dispatch[BININT] = load_binint def load_binint1(self): self.append(ord(self.read(1))) dispatch[BININT1] = load_binint1 def load_binint2(self): self.append(mloads('i' + self.read(2) + '\000\000')) dispatch[BININT2] = load_binint2 def load_long(self): self.append(long(self.readline()[:-1], 0)) dispatch[LONG] = load_long # Commented out because decode_long() depends on _binascii. #def load_long1(self): #def load_long4(self): def load_float(self): self.append(float(self.readline()[:-1])) dispatch[FLOAT] = load_float # Commented out because of struct.unpack. #def load_binfloat(self): def load_string(self): rep = self.readline()[:-1] for q in "\"'": # double or single quote if rep.startswith(q): if len(rep) < 2 or not rep.endswith(q): raise ValueError, "insecure string pickle" rep = rep[len(q):-len(q)] break else: raise ValueError, "insecure string pickle" self.append(rep.decode("string-escape")) dispatch[STRING] = load_string def load_binstring(self): len = mloads('i' + self.read(4)) self.append(self.read(len)) dispatch[BINSTRING] = load_binstring def load_unicode(self): self.append(unicode(self.readline()[:-1],'raw-unicode-escape')) dispatch[UNICODE] = load_unicode def load_binunicode(self): len = mloads('i' + self.read(4)) self.append(unicode(self.read(len),'utf-8')) dispatch[BINUNICODE] = load_binunicode def load_short_binstring(self): len = ord(self.read(1)) self.append(self.read(len)) dispatch[SHORT_BINSTRING] = load_short_binstring def load_tuple(self): k = self.marker() self.stack[k:] = [tuple(self.stack[k+1:])] dispatch[TUPLE] = load_tuple def load_empty_tuple(self): self.stack.append(()) dispatch[EMPTY_TUPLE] = load_empty_tuple def load_tuple1(self): self.stack[-1] = (self.stack[-1],) dispatch[TUPLE1] = load_tuple1 def load_tuple2(self): self.stack[-2:] = [(self.stack[-2], self.stack[-1])] dispatch[TUPLE2] = load_tuple2 def load_tuple3(self): self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])] dispatch[TUPLE3] = load_tuple3 def load_empty_list(self): self.stack.append([]) dispatch[EMPTY_LIST] = load_empty_list def load_empty_dictionary(self): self.stack.append({}) dispatch[EMPTY_DICT] = load_empty_dictionary def load_list(self): k = self.marker() self.stack[k:] = [self.stack[k+1:]] dispatch[LIST] = load_list def load_dict(self): k = self.marker() d = {} items = self.stack[k+1:] for i in xrange(0, len(items), 2): key = items[i] value = items[i+1] d[key] = value self.stack[k:] = [d] dispatch[DICT] = load_dict # INST and OBJ differ only in how they get a class object. It's not # only sensible to do the rest in a common routine, the two routines # previously diverged and grew different bugs. # klass is the class to instantiate, and k points to the topmost mark # object, following which are the arguments for klass.__init__. def _instantiate(self, klass, k): args = tuple(self.stack[k+1:]) del self.stack[k:] instantiated = 0 if (not args and type(klass) is ClassType and not hasattr(klass, "__getinitargs__")): try: value = _EmptyClass() value.__class__ = klass instantiated = 1 except RuntimeError: # In restricted execution, assignment to inst.__class__ is # prohibited pass if not instantiated: try: value = klass(*args) except TypeError, err: raise TypeError, "in constructor for %s: %s" % ( klass.__name__, str(err)), sys.exc_info()[2] self.append(value) def load_inst(self): module = self.readline()[:-1] name = self.readline()[:-1] klass = self.find_class(module, name) self._instantiate(klass, self.marker()) dispatch[INST] = load_inst def load_obj(self): # Stack is ... markobject classobject arg1 arg2 ... k = self.marker() klass = self.stack.pop(k+1) self._instantiate(klass, k) dispatch[OBJ] = load_obj def load_newobj(self): args = self.stack.pop() cls = self.stack[-1] obj = cls.__new__(cls, *args) self.stack[-1] = obj dispatch[NEWOBJ] = load_newobj def load_global(self): module = self.readline()[:-1] name = self.readline()[:-1] klass = self.find_class(module, name) self.append(klass) dispatch[GLOBAL] = load_global def find_class(self, module, name): # Subclasses may override this __import__(module) mod = sys.modules[module] klass = getattr(mod, name) return klass def load_reduce(self): stack = self.stack args = stack.pop() func = stack[-1] value = func(*args) stack[-1] = value dispatch[REDUCE] = load_reduce def load_pop(self): del self.stack[-1] dispatch[POP] = load_pop def load_pop_mark(self): k = self.marker() del self.stack[k:] dispatch[POP_MARK] = load_pop_mark def load_dup(self): self.append(self.stack[-1]) dispatch[DUP] = load_dup def load_get(self): self.append(self.memo[self.readline()[:-1]]) dispatch[GET] = load_get def load_binget(self): i = ord(self.read(1)) self.append(self.memo[repr(i)]) dispatch[BINGET] = load_binget def load_long_binget(self): i = mloads('i' + self.read(4)) self.append(self.memo[repr(i)]) dispatch[LONG_BINGET] = load_long_binget def load_put(self): self.memo[self.readline()[:-1]] = self.stack[-1] dispatch[PUT] = load_put def load_binput(self): i = ord(self.read(1)) self.memo[repr(i)] = self.stack[-1] dispatch[BINPUT] = load_binput def load_long_binput(self): i = mloads('i' + self.read(4)) self.memo[repr(i)] = self.stack[-1] dispatch[LONG_BINPUT] = load_long_binput def load_append(self): stack = self.stack value = stack.pop() list = stack[-1] list.append(value) dispatch[APPEND] = load_append def load_appends(self): stack = self.stack mark = self.marker() list = stack[mark - 1] list.extend(stack[mark + 1:]) del stack[mark:] dispatch[APPENDS] = load_appends def load_setitem(self): stack = self.stack value = stack.pop() key = stack.pop() dict = stack[-1] dict[key] = value dispatch[SETITEM] = load_setitem def load_setitems(self): stack = self.stack mark = self.marker() dict = stack[mark - 1] for i in xrange(mark + 1, len(stack), 2): dict[stack[i]] = stack[i + 1] del stack[mark:] dispatch[SETITEMS] = load_setitems def load_build(self): stack = self.stack state = stack.pop() inst = stack[-1] setstate = getattr(inst, "__setstate__", None) if setstate: setstate(state) return slotstate = None if isinstance(state, tuple) and len(state) == 2: state, slotstate = state if state: try: d = inst.__dict__ try: for k, v in state.iteritems(): d[intern(k)] = v # keys in state don't have to be strings # don't blow up, but don't go out of our way except TypeError: d.update(state) except RuntimeError: # XXX In restricted execution, the instance's __dict__ # is not accessible. Use the old way of unpickling # the instance variables. This is a semantic # difference when unpickling in restricted # vs. unrestricted modes. # Note, however, that cPickle has never tried to do the # .update() business, and always uses # PyObject_SetItem(inst.__dict__, key, value) in a # loop over state.items(). for k, v in state.items(): setattr(inst, k, v) if slotstate: for k, v in slotstate.items(): setattr(inst, k, v) dispatch[BUILD] = load_build def load_mark(self): self.append(self.mark) dispatch[MARK] = load_mark def load_stop(self): value = self.stack.pop() raise _Stop(value) dispatch[STOP] = load_stop def load_v2_subset(f): """Turn a pickle into an object. Handles a subset of the v2 protocol. """ return Unpickler(f).load()
from random import choice from time import sleep STUDENTS = [ 'Name 1', 'Name 2', 'Name 3', ] def rien_ne_va_plus(hot_seats, safe_students): if hot_seats == []: hot_seats = STUDENTS.copy() safe_students = [] print(f"Students on the hot seats for the next question:\n\t{', '.join(hot_seats)}\n") sleep(1) print(f"Safe students (for now):\n\t{'Nobody 😈' if safe_students == [] else ', '.join(safe_students)}\n") sleep(1) the_chosen_one = choice(hot_seats) hot_seats.remove(the_chosen_one) safe_students.append(the_chosen_one) print("The chosen one is", end=" ") for i in range(3): print('.', end="") sleep(1) print(f" {choice(['🎉', '🥳', '🎊'])} {the_chosen_one.upper()} {choice(['🎉', '🥳', '🎊'])}") return hot_seats, safe_students
rzymskie ={ 1:'I', 2:'II', 3:'III', 4:"IV", 5:"V", 6:"VI", 7:"VII", 8:"VIII", 9:'IX', 10:"X"} cyfra = input("podaj liczbę: ") #po_rzymsku = rzymskie[liczba] #print(po_rzymsku) #cyfra 28 cyfra_dziesiatek = int(cyfra[-2])# przedostatnia cyfra, przdostatni znak zamieniona na cyfrę cyfra_jedności= int(cyfra[-1]) jednosci_rzymskie=rzymskie[cyfra_jedności] dziesiatki_rzymskie='X'*cyfra_dziesiatek print(dziesiatki_rzymskie+jednosci_rzymskie)
#!/usr/bin/python """ Script to make light budget calculations for permitted cable lengths for a given TAP or appropriate TAP for current link. """ from decimal import Decimal, getcontext def menu(): """ High level menu for available options. """ option = raw_input("""\nWhat would you like to do: 1 - Calculate the max allowed coupler loss for inserting a TAP into a link 2 - Calculate the max allowed cable length for a given TAP split ratio in a link 3 - Display ethernet fiber standards and max cabling distance 4 - Exit Enter your selection: """) if option not in ('1', '2', '3', '4'): print "That is not a valid input" menu() if option == '1': max_split() elif option == '2': max_cable() elif option == '3': ethernet_table() elif option == '4': print "Goodbye" exit() else: print 'Something went wrong!' exit() def max_split(): """ Determine the maximum split ratio that can be used on a given link. """ getcontext().prec = 5 sender = Decimal(raw_input("\nwhat is the sender transmit power (dB): ")) receiver = Decimal(raw_input("\nWhat is the receiver sensitivity (dB): ")) link_loss_budget = sender - receiver print "\nThe Power Link Loss Budget for this link is %sdB" % link_loss_budget mode = raw_input("""\nSingle Mode or Multi Mode fiber? 1 - Single Mode 2 - Multi Mode Enter the number of your selection: """) if mode not in ('1', '2'): print "\nThat is not a valid selection" menu() connectors = Decimal(raw_input("\nHow many connectors are in the path of the link: ")) if mode == '1': connector_loss = Decimal('0.2') * connectors mode_type = "Single Mode" wave = raw_input("""\nWhat is the wavelength being used? 1 - 1310nm 2 - 1550nm Enter the number of your selection: """) if wave not in ('1', '2'): print "\nThat is not a valid selection" menu() if wave == '1': wavelength = 1310 else: wavelength = 1550 else: connector_loss = Decimal('0.5') * connectors mode_type = "Multi Mode" wave = raw_input("""\nWhat is the wavelength being used? 1 - 850nm 2 - 1300nm Enter the number of your selection: """) if wave not in ('1', '2'): print "\nThat is not a valid selection" menu() if wave == '1': wavelength = 850 else: wavelength = 1300 print ("\nThe total loss introduced for the %s link by connectors " "is %sdB\n" % (mode_type, connector_loss)) cable = int(raw_input("What is the cable length from the sender to the receiver in meters: ")) if mode_type == 'Single Mode' and wavelength == 1310: attenuation = Decimal('0.4') elif mode_type == 'Single Mode' and wavelength == 1500: attenuation = Decimal('0.3') elif mode_type == 'Multi Mode' and wavelength == 850: attenuation = Decimal('3.0') elif mode_type == 'Multi Mode' and wavelength == 1300: attenuation = Decimal('1.0') else: print "Something went wrong." cable_loss = Decimal(cable / 1000.000) * attenuation print """\nThe loss introduced by the length of cable for the %s %s link is %sdB based on %sdB/km fiber attenuation. \n""" % (mode_type, wavelength, cable_loss, attenuation) total_cable_loss = connector_loss + cable_loss print "The total connection loss is %sdB\n" % total_cable_loss allowable_loss = link_loss_budget - total_cable_loss print """The allowable coupler loss for a TAP is a %sdB maximum at the monitor port\n""" % allowable_loss choice = raw_input("""Reference which TAP insertion loss values? 1 - Industry Standard 2 - Cubro Average Enter your selection: """) if choice not in ('1', '2'): print "\nThat is not a valid selection" menu() if choice == '1': match_industry(mode_type, allowable_loss) if choice == '2': match_cubro(mode_type, allowable_loss) def match_industry(mode, loss): """ Determine available TAP options using industry standard values.""" #Maximum recommended values taps_mm = {'50/50': {'Network': '4.5', 'Monitor': '4.5'}, '60/40': {'Network': '3.1', 'Monitor': '5.1'}, '70/30': {'Network': '2.4', 'Monitor': '6.3'}, '80/20': {'Network': '1.8', 'Monitor': '8.1'}, '90/10': {'Network': '1.3', 'Monitor': '11.5'}} taps_sm = {'50/50': {'Network': '3.7', 'Monitor': '3.7'}, '60/40': {'Network': '2.8', 'Monitor': '4.8'}, '70/30': {'Network': '2.0', 'Monitor': '6.1'}, '80/20': {'Network': '1.3', 'Monitor': '8.0'}, '90/10': {'Network': '0.8', 'Monitor': '12.0'}} usable = [] if mode == 'Single Mode': for split in taps_sm: if float(taps_sm[split]['Monitor']) < float(loss): usable.append(split) elif mode == 'Multi Mode': for split in taps_mm: if float(taps_mm[split]['Monitor']) < float(loss): usable.append(split) else: print "\nSomething went wrong" print """\nThe following split ratios are acceptable for this link %s""" % usable menu() def match_cubro(mode, loss): """ Determine available TAP options for a given link using Cubro values.""" #Adjusted average Cubro values taps_mm = {'50/50': {'Network': '4.5', 'Monitor': '4.5'}, '60/40': {'Network': '3.1', 'Monitor': '5.1'}, '70/30': {'Network': '2.4', 'Monitor': '6.3'}, '80/20': {'Network': '1.8', 'Monitor': '8.1'}, '90/10': {'Network': '1.3', 'Monitor': '11.5'}} taps_sm = {'50/50': {'Network': '3.6', 'Monitor': '3.5'}, '60/40': {'Network': '2.8', 'Monitor': '4.8'}, '70/30': {'Network': '2.0', 'Monitor': '6.1'}, '80/20': {'Network': '1.3', 'Monitor': '8.0'}, '90/10': {'Network': '0.8', 'Monitor': '12.0'}} usable = [] if mode == 'Single Mode': for split in taps_sm: if float(taps_sm[split]['Monitor']) < float(loss): usable.append(split) elif mode == 'Multi Mode': for split in taps_mm: if float(taps_mm[split]['Monitor']) < float(loss): usable.append(split) else: print "\nSomething went wrong" print """\nThe following split ratios are acceptable for this link %s""" % usable menu() def max_cable(): """ Function to determine max cable length for a given link + connectors.""" getcontext().prec = 5 sender = Decimal(raw_input("\nwhat is the sender transmit power (dB): ")) receiver = Decimal(raw_input("\nWhat is the receiver sensitivity (dB): ")) link_loss_budget = sender - receiver print "\nThe Power Link Loss Budget for this link is %sdB" % link_loss_budget mode = raw_input("""\nSingle Mode or Multi Mode fiber? 1 - Single Mode 2 - Multi Mode Enter the number of your selection: """) if mode not in ('1', '2'): print "\nThat is not a valid selection." menu() connectors = Decimal(raw_input("\nHow many connectors are in the path of the link: ")) if mode == '1': connector_loss = Decimal('0.2') * connectors mode_type = "Single Mode" print """\nWhat is the wavelength being used? 1 - 1310nm 2 - 1550nm""" wave = raw_input("\nEnter the number of your selection: ") if wave not in ('1', '2'): print "\nThat is not a valid selection." if wave == '1': wavelength = 1310 else: wavelength = 1550 else: connector_loss = Decimal('0.5') * connectors mode_type = "Multi Mode" wave = raw_input("""\nWhat is the wavelength being used? 1 - 850nm 2 - 1300nm Enter the number of your selection: """) if wave not in ('1', '2'): print "That is not a valid selection." menu() if wave == '1': wavelength = 850 else: wavelength = 1300 print ("\nThe total loss introduced for the %s link by " "connectors is %sdB\n" % (mode_type, connector_loss)) split = raw_input("""\nWhat is the split ratio of the TAP? 1 - 50/50 2 - 60/40 3 - 70/30 4 - 80/20 5 - 90/10 Enter the number of your selection: """) if split not in ('1', '2', '3', '4', '5'): print "That is not a valid input for split ratio" menu() split_ratios = {'1': '50/50', '2': '60/40', '3': '70/30', '4': '80/20', '5': '90/10'} ratio = split_ratios[split] taps_mm = {'50/50': {'Network': '4.5', 'Monitor': '4.5'}, '60/40': {'Network': '3.1', 'Monitor': '5.1'}, '70/30': {'Network': '2.4', 'Monitor': '6.3'}, '80/20': {'Network': '1.8', 'Monitor': '8.1'}, '90/10': {'Network': '1.3', 'Monitor': '11.5'}} taps_sm = {'50/50': {'Network': '3.7', 'Monitor': '3.7'}, '60/40': {'Network': '2.8', 'Monitor': '4.8'}, '70/30': {'Network': '2.0', 'Monitor': '6.1'}, '80/20': {'Network': '1.3', 'Monitor': '8.0'}, '90/10': {'Network': '0.8', 'Monitor': '12.0'}} if mode_type == 'Single Mode': for value in taps_sm: if ratio == value: network = Decimal(taps_sm[value]['Network']) monitor = Decimal(taps_sm[value]['Monitor']) elif mode_type == 'Multi Mode': for value in taps_mm: if ratio == value: network = Decimal(taps_mm[value]['Network']) monitor = Decimal(taps_mm[value]['Monitor']) else: print 'Something went wrong' total_loss_net = link_loss_budget - connector_loss - network total_loss_mon = link_loss_budget - connector_loss - monitor if mode_type == 'Single Mode' and wavelength == 1310: attenuation = Decimal('0.4') elif mode_type == 'Single Mode' and wavelength == 1500: attenuation = Decimal('0.3') elif mode_type == 'Multi Mode' and wavelength == 850: attenuation = Decimal('3.0') elif mode_type == 'Multi Mode' and wavelength == 1300: attenuation = Decimal('1.0') else: print "Something went wrong." cable_net = 1 cable_loss_net = Decimal(cable_net * (attenuation / 1000)) while total_loss_net - cable_loss_net > 0: cable_net += 1 cable_loss_net = Decimal(cable_net * (attenuation / 1000)) cable_mon = 1 cable_loss_mon = Decimal(cable_mon * (attenuation / 1000)) while total_loss_mon - cable_loss_mon > 0: cable_mon += 1 cable_loss_mon = Decimal(cable_mon * (attenuation / 1000)) cable_by_eth_standard(mode_type, cable_net, cable_mon) def cable_by_eth_standard(mode_type, cable_net, cable_mon): """ Determines what the maximum cable length could be given a Ethernet fiber standard.""" if mode_type == 'Multi Mode': standard_type = raw_input("""\nWhat is the Ethernet Standard in use? 1 - OM1-SX 2 - OM1-LX 3 - OM2 4 - OM3 5 - OM4 Enter the number of your selection: """) if standard_type not in ('1', '2', '3', '4', '5'): print "That is not a valid selection." menu() standard_table = {'1': 'OM1-SX', '2': 'OM1-LX', '3': 'OM2', '4': 'OM3', '5': 'OM4'} standard = standard_table[standard_type] speed = raw_input("""\nWhat speed is the link? 1 - 100M 2 - 1G 3 - 10G 4 - 40G 5 - 100G Enter the number of your selection: """) if speed not in ('1', '2', '3', '4', '5'): print "That is not a valid selection." menu() speed_table = {'1': '100M', '2': '1G', '3': '10G', '4': '40G', '5': '100G'} link_speed = speed_table[speed] table = { 'Single Mode':{'100M': 2000, '1G': 5000, '10G': 10000, '40G': 'Unknown', '100G': 'Unknown'}, 'Multi Mode': {'OM1-SX': {'100M': 2000, '1G': 275, '10G': 33}, 'OM1-LX': {'100M': 2000, '1G': 550, '10G': 33}, 'OM2': {'100M': 2000, '1G': 550, '10G': 82}, 'OM3': {'100M': 2000, '1G': 550, '10G': 300, '40G': 100, '100G': 100}, 'OM4': {'100M': 2000, '1G': 550, '10G': 400, '40G': 150, '100G': 150} }} if mode_type == 'Single Mode': try: max_standard_length = table['Single Mode'][link_speed] if cable_net > max_standard_length: cable_net = max_standard_length if cable_mon > max_standard_length: cable_mon = max_standard_length except (KeyError, ValueError) as reason: print ("That standard does not support that speed.", reason) if mode_type == 'Multi Mode': try: max_standard_length = table['Multi Mode'][standard][link_speed] if cable_net > max_standard_length: cable_net = max_standard_length if cable_mon > max_standard_length: cable_mon = max_standard_length except KeyError as reason: print ("That standard does not support that speed.", reason) print ("\nThe maximum combined cable length from sender to TAP and from " "TAP to receiver is %s meters" % cable_net) print ("\nThe maximum combined cable length from sender to TAP and from " "TAP monitor to tool is %s meters" % cable_mon) menu() def ethernet_table(): """ Display table of fiber standards. """ print """Ethernet Fiber Standards and max cabling distance: ________________________________________________________________________________________________________________ | | Core/ | | FastEthernet | 1G Ethernet | 1G Ethernet | 10G | 40G | 100G | | Name | Cladding | Type | 100BaseFX | 1000Base-SX | 1000Base-LX | 10GBase | 40GBase | 100GBase | | ________|____________|______|______________|_______________|_______________|___________|___________|___________| | OM1 | 62.5/125 | MM | 2000M | 275M | 550M* | 33M | NA | NA | |_________|____________|______|______________|_______________|_______________|___________|___________|___________| | OM2 | 62.5/125 | MM | 2000M | 550M | 550M* | 82M | NA | NA | |_________|____________|______|______________|_______________|_______________|___________|___________|___________| | OM3 | 50/125 | MM | 2000M | 550M | 550M | 300M | 100M | 100M | |_________|____________|______|______________|_______________|_______________|___________|___________|___________| | OM4 | 50/125 | MM | 2000M | 550M | 550M | 400M | 150M | 150M | |_________|____________|______|______________|_______________|_______________|___________|___________|___________| | | | | | 5km @ | 5km @ | 10km @ | | | | SM | 9/125 | SM | 2000M | 1310nm | 1310nm | 1310nm | | | |_________|____________|______|______________|_______________|_______________|___________|___________|___________| *mode condition patch cable required """ menu() if __name__ == '__main__': menu()
class Node(object): def __init__(self, data = None, next_node = None): self.data = data self.next = next_node def __str__(self): return str(self.data) def get_data(self): return self.data def print_list(node): while node: print(node.get_data()) node = node.next
class Config(object): DEBUG = True SQLALCHEMY_DATABASE_URI = "mysql://dev:dev@localhost/ssi" SQLALCHEMY_ECHO = False SECRET_KEY = "secret"
from StringIO import StringIO import tornado.gen import tornado.testing import tornado.web from tornado.httpclient import HTTPResponse, HTTPRequest from mainhandler import MainHandler class MainHandlerTest(tornado.testing.AsyncHTTPTestCase): "Test fixture for MainHandler class" def __init__(self, *args, **kwargs): "Initialise fixture" super(MainHandlerTest, self).__init__(*args, **kwargs) self._datastore = MockDataStore() self._mock_google_response = None self._mock_yahoo_response = None def get_app(self): "Return a Tornado application instance that will be used for the tests" servicesDict={'datastore': self._datastore} return tornado.web.Application([ (r"/", MainHandler, servicesDict) ]) def testSimpleGet(self): "Test simple GET" self._datastore.mock_google_response = HTTPResponse(HTTPRequest('http://www.google.com'), code=200, headers={'Content-Length': 24}, buffer=StringIO('<html><body>I Am Google</body></html>')) self._datastore.mock_yahoo_response = HTTPResponse(HTTPRequest('http://search.yahoo.com'), code=200, headers={'Content-Length': 20}, buffer=StringIO('<html><body>I Am Groot</body></html>')) response = self.fetch('/') self.assertEqual(response.code, 200) self.assertIn('<h1>This is the Home Page!</h1>', response.body) self.assertIn('<p>24 bytes retrieved from backend 1, 20 bytes from backend 2</p>', response.body) self.assertNotIn('Error', response.body) class MockDataStore(object): "Mock datastore" def __init__(self): "Initialise mock store" self.mock_google_response = None self.mock_yahoo_response = None @tornado.gen.coroutine def get_google(self): "Mock get google content" raise tornado.gen.Return(self.mock_google_response) @tornado.gen.coroutine def get_yahoo(self): "Mock get yahoo content" raise tornado.gen.Return(self.mock_yahoo_response)
import hashlib import imghdr import sec import db_op __all__ = ['get_type_by_stream', 'save_file', 'get_hashcode', 'get_default', 'get_image'] _SAVE_PATH = '/Yagra/upload/' _DEFAULT_IMG = '/Yagra/web/static/rex.jpeg' def get_type_by_stream(stream): """Check the data stream and return what type of image it is.""" return imghdr.what('', stream) def save_file(data, username): """Save the image data into file and record the file path.""" filepath = _SAVE_PATH + username with open(filepath, 'wb') as f: f.write(data) db_op.add_image(get_hashcode(username), username) def get_hashcode(username): """Return MD5 hex digest of the lower case input string.""" return hashlib.md5(username.lower()).hexdigest() def get_default(): """Return the file path of the default image.""" return _DEFAULT_IMG def get_image(hashcode): """Get the image file path by hash code.""" if sec.check_hashcode(hashcode): images = db_op.get_image(hashcode) if len(images) == 1: filename = images[0][1] return _SAVE_PATH + filename return None
# Linear Regression Machine Learning Program # # Sources used: # - https://towardsdatascience.com/master-machine-learning-multiple-linear-regression-from-scratch-with-python-ac716a9b78a4 # Sean Taylor Thomas import numpy as np import matplotlib.pyplot as plt # from matplotlib import rcParams # rcParams['figure.figsize'] = (14,7) # rcParams['axes.spines.top'] = False # rcParams['axes.spins.right'] = False def import_data(filename): """ Take data from txt file""" dataset = list() with open(filename) as f: lines = f.readlines() for line in lines: dataset.append(line.split()) return dataset def str_column_to_float(dataset, column): """ Convert string column to float """ for row in dataset: row[column] = float(row[column].strip()) class LinearRegression: """ Implementation of linear regression using gradient descent""" def __init__(self, l_rate = 0.7, iterations=1000): self.l_rate = l_rate self.iterations = iterations self.weights = None self.bias = None self.loss =[] @staticmethod def _mean_squared_error(y, y_hat): """ Evaluating loss at each iteration y = array of known values y_hat = array of predicted values returns float representing error""" error = 0 for i in range(len(y)): error += (y[i] - y_hat[i]) **2 return error / len(y) def fit(self, X, y): self.weights = np.zeros(X.shape[1]) self.bias = 0 for i in range(self.iterations): y_hat = np.dot(X, self.weights) + self.bias loss = self._mean_squared_error(y, y_hat) self.loss.append(loss) deriv_w = (1 / X.shape[0]) * (2 * np.dot(X.T, (y_hat - y))) deriv_d = (1 / X.shape[0]) * (2 * np.sum(y_hat - y)) self.weights -= self.l_rate * deriv_w self.bias -= self.l_rate * deriv_d def predict(self, X): return np.dot(X, self.weights) + self.bias from sklearn.datasets import load_diabetes data = load_diabetes() x = data.data y = data.target filename = 'housing.data' x = import_data(filename) # put data in x and target (dependent var) data in y for i in range(len(x[0])): str_column_to_float(x, i) y = list() for row in x: y.append(row[-1]) row.remove(row[-1]) # separate x (independent vars) from y (dependent var) # put into numpy arrays and normalize data x = np.array(x) y = np.array(y) xnorm = np.linalg.norm(x) x = x / xnorm # split data into training and testing data from sklearn.model_selection import train_test_split x_train, x_test, y_train, t_test = train_test_split(x,y, test_size = .2, random_state = 42) model = LinearRegression() model.fit(x_train, y_train) predictions = model.predict(x_test) print(x_test) print(predictions) xs = np.arange(len(model.loss)) ys = model.loss # Plotting our loss over iterations plt.plot(xs, ys, lw=3, c='#0033a3') plt.title('Loss per iteration(MSE)', size=20) plt.xlabel('Iteration', size=14) plt.ylabel('Loss', size=14) plt.show() # test over different learning rates # losses = {} # for lr in [.7,0.5, 0.1, 0.01, 0.001]: # model = LinearRegression(l_rate=lr) # model.fit(x_train, y_train) # losses[f'LR={str(lr)}'] = model.loss # # xs = np.arange(len(model.loss)) # plt.plot(xs, losses['LR=0.7'], lw=3, label=f"LR = 0.7, Final = {losses['LR=0.7'][-1]:.2f}") # plt.plot(xs, losses['LR=0.5'], lw=3, label=f"LR = 0.5, Final = {losses['LR=0.5'][-1]:.2f}") # plt.plot(xs, losses['LR=0.1'], lw=3, label=f"LR = 0.1, Final = {losses['LR=0.1'][-1]:.2f}") # plt.plot(xs, losses['LR=0.01'], lw=3, label=f"LR = 0.01, Final = {losses['LR=0.01'][-1]:.2f}") # plt.plot(xs, losses['LR=0.001'], lw=3, label=f"LR = 0.001, Final = {losses['LR=0.001'][-1]:.2f}") # plt.title('Loss per iteration (MSE) across l_rates', size=20) # plt.xlabel('Iteration', size=14) # plt.ylabel('Loss', size=14) # plt.legend() # plt.show() # User predictions: num_cols = len(x[0]) user_input = input("Would you like to provide input for prediction? y/n") iter1 = 0 x1 = list() # user x while user_input == 'y' and iter1 < num_cols: user_x = input("Attribute %d : " % iter1) if not(user_x == '' or user_x == " " or user_x == "\n"): x1.append(float(user_x)) iter1 += 1 if (user_input == 'y'): x1 = x1 / xnorm user_prediction = model.predict(x1) print(x1) print("Prediction : ", user_prediction)
from collections import deque queue = deque(["Eric", "John", "Michael"]) queue.append("Ben") queue.append("Helen") print queue print queue.popleft() print queue
import numpy as np # Creating an 1d - array li = [1, 2, 3] arr = np.array(li) print(arr) # Creating an 2d - array li = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] arr = np.array(li) print(arr) # Creating an array of a number sequence arr = np.arange(0, 10) print(arr) arr = np.arange(0, 10, step=2) print(arr) # Creating an 1-d array with zeros arr = np.zeros(3) print(arr) # Creating an 2-d array with zeros my_tuple = ((5, 5)) arr = np.zeros(my_tuple) print(arr) # Creating an 1-d array with ones arr = np.ones(8) print(arr) # Creating an 2-d array with ones arr = np.ones((3, 5)) print(arr) # Creating identity matrix arr = np.eye(5) print(arr) # Creating an array # initialize in a number # finalizes in a number # specifying the numbers of elements between initial and final number initial = 0 final = 1 n_numbers = 3 arr = np.linspace(initial, final, n_numbers) print(arr) # Creating random numbers # between 0 and 1 # with equal problably to be selected # extrated from a uniform distribution # 1-dimensional n_numbers = 5 arr = np.random.rand(n_numbers) print(arr) # n-dimensional arr = np.random.rand(10, 5) print(arr) # Creating random numbers # between 0 and 1 # extracted from a normal distribution # 1-dimensional arr = np.random.rand(5) print(arr) # n-dimensional arr = np.random.rand(10, 5) print(arr) # Creating random numbers # integers # 10 random integers between 0 and 99 arr = np.random.randint(0, 100, 10) # Round the numbers of the arrays arr = np.random.rand(5) * 100 arr_2 = np.round(arr, decimals=0) print(arr_2) # Reshape arr = np.random.rand(25) print(arr) tpl: tuple = (5,5) arr_2 = arr.reshape(tpl) print(arr_2) print(arr_2.shape) # shape attribute # Find the max/min value of an array arr = np.random.rand(2) max_value = arr.max() min_value = arr.min() print(arr) print(max_value) print(arr, min_value) # Find the index of the max/min value of an array arr = np.random.rand(2) max_value = arr.argmax() min_value = arr.argmin() print(arr) print(max_value) print(min_value) print(max_value) print(min_value) """ Indexing and Slicing arrays """ # Select element by index arr = np.arange(0, 30, 3) number = arr[4] print(number) # Slicing: selecet multiple elements # like a list arr = np.arange(0, 30, 3) numbers = arr[0:4] # first 4 - 0 == 4 index elements print(numbers) numbers = arr[:4] # first 4 index elements print(numbers) numbers = arr[4:] # elements grather or equal than 4 index print(numbers) # attributing values to a slice of an array arr_2 = arr.copy() arr_2[4:] = 100 print(arr_2) arr_2 = arr.copy() arr_2[4:] = 100 print(arr_2) # Slicing n-dim arrays arr = np.arange(50).reshape(5, 10) print(arr) print(arr.shape) def compare_arrays(arr, arr_2, arr_3): print(arr_2, id(arr_2)) print(arr_3, id(arr_3)) arr_boolean = arr_2 == arr_3 # compara values boolean: bool = arr_boolean.all() # true if all is true print(arr_boolean) print(boolean) arr_2[:] = 100 # setting 100 to all array elements print(arr_2) print(arr) # way 01: arr.[lines][columns] arr_2 = arr.copy()[:3][:] # copy function to avoid point to arr when alter arr_2 or arr_3 arr_3 = arr.copy()[:3] compare_arrays(arr, arr_2, arr_3) # way 02: # comma notation arr_2 = arr.copy()[1:4, ] arr_3 = arr.copy()[1:4, :] compare_arrays(arr, arr_2, arr_3) # Select items by Logic operations in arrays arr = np.arange(100).reshape(10, 10) bol = arr > 50 arr_2 = arr.copy()[bol] print(arr) print(bol) print(arr_2) """ Numpy Array Operations """ arr = np.arange(0, 16) # sum arr_2 = arr + arr print(arr_2) # sub arr_2 = arr - arr print(arr_2) # multiplication arr_2 = arr * arr print(arr_2) # division arr_2 = arr / arr """ Error message equivalent to ZeroDivisionError in array operation (which not raise an error, and returns nan instead in 0/0) <ipython-input-105-1a13d9f299b5>:1: RuntimeWarning: invalid value encountered in true_divide arr_2 = arr / arr [nan 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.] """ # Index by index division arr_3 = 1 / arr """ Error message equivalent to ZeroDivisionError in array operation (which not raise an error, and returns inf instead in 1/0) """ print(arr_2) # Exponentiation: index by index # Are the same with +, - * and / operations arr_2 = arr ** 2 print(arr_2) # square root arr_2 = np.sqrt(arr) # exponentiation of all array arr_2 = np.exp(arr) # mean mean: np.float64 = np.mean(arr) # standard deviation std: np.float64 = np.std(arr) # sin arr_2: np.float64 = np.sin(arr) # the major value of the array max_value: np.int64 = np.max(arr) max_value: np.int64 = arr.max() # the mininum value of the array min_value: np.int64 = np.min(arr) min_value: np.int64 = arr.min()
def is_palindrome_permutation(strng): cache = set() for char in strng.lower(): if char in cache: cache.remove(char) else: cache.add(char) return len(cache) <= 1 is_palindrome_permutation('RACECAR')
import cv2 def bgr8_to_jpeg(value): if value is None: return bytes() return bytes(cv2.imencode('.jpg', value)[1])
from soda.execution.table import Table class Column: def __init__(self, table: "Table", column_name: str): from soda.sodacl.column_configurations_cfg import ColumnConfigurationsCfg self.data_source_scan = table.data_source_scan self.table = table self.column_name = str(column_name) self.column_configurations_cfg: ColumnConfigurationsCfg = None def set_column_configuration_cfg(self, column_configurations_cfg: "ColumnConfigurationsCfg"): self.column_configurations_cfg = column_configurations_cfg @classmethod def get_partition_name(cls, column): return column.column_name if isinstance(column, Column) else None
import numpy as np import sys from ..mike_model.tariff import Tariff class Customer: """Can be resident, strata body, or ENO representing aggregation of residents.""" def __init__(self, name, study, timeseries): self.name = name self.study = study self.ts = timeseries self.tariff_data = self.study.tariff_data self.en_capex_repayment = 0 self.en_opex = 0 self.bat_capex_repayment = 0 self.exports = np.zeros(self.ts.get_num_steps()) self.imports = np.zeros(self.ts.get_num_steps()) # self.local_exports = np.zeros(ts.get_num_steps()) # not used, available for local trading self.solar_allocation = np.zeros(self.ts.get_num_steps()) # used for allocation of local generation self.local_consumption = np.zeros(self.ts.get_num_steps()) self.flows = np.zeros(self.ts.get_num_steps()) self.cash_flows = np.zeros(self.ts.get_num_steps()) self.import_charge = np.zeros(self.ts.get_num_steps()) self.local_solar_bill = 0 self.total_payment = 0 # TODO My linter wants everything instantiated within the init self.load = None self.coincidence = None self.tariff_id = None self.scenario = None self.tariff = None self.generation = None self.demand_charge = None self.npv = None self.energy_bill = None # as 1-d np.array def initialise_customer_load(self, customer_load): """Set customer load, energy flows and cashflows to zero.""" self.load = customer_load # used for calculating self-consumption and self sufficiency self.coincidence = np.zeros(self.ts.get_num_steps()) def initialise_customer_tariff(self, customer_tariff_id, scenario): self.tariff_id = customer_tariff_id self.scenario = scenario self.tariff = Tariff(tariff_id=self.tariff_id, scenario=scenario) def initialise_customer_pv(self, pv_generation): # 1-D array self.generation = pv_generation def calc_static_energy(self): """Calculate Customer imports and exports for whole time period""" self.flows = self.generation - self.load self.exports = self.flows.clip(0) self.imports = (-1 * self.flows).clip(0) # # Calculate local quota here?? # self.solar_allocation = np.minimum(self.imports, self.local_quota) # for use of local generation # for btm_p and btm_s arrangements: self.local_consumption = np.minimum(self.generation, self.load) def calc_dynamic_energy(self, step): """Calculate Customer imports and exports for single timestep""" # Used for scenarios with batteries # ------------------------------------------------------------------------------- # Calculate energy flow without battery, then modify by calling battery.dispatch: # ------------------------------------------------------------------------------- self.flows[step] = self.generation[step] - self.load[step] if self.has_battery: self.flows[step] = self.battery.dispatch(generation=self.generation[step], load=self.load[step], step=step) else: self.flows[step] = self.generation[step] - self.load[step] self.exports[step] = self.flows[step].clip(0) self.imports[step] = (-1 * self.flows[step]).clip(0) # Calculate local quota here?? # # Solar allocation is for solar_instantaneous tariff # self.solar_allocation[step] = np.minimum(self.imports[step], self.local_quota[step]) # Local Consumption is PV self-consumed by customer (which is charged for in btm_p arrangement) self.local_consumption[step] = np.minimum(self.generation[step], self.load[step]) def calc_demand_charge(self): if self.tariff.is_demand: max_demand = np.multiply(self.imports, self.tariff.demand_period_array).max() * 2 # convert kWh to kW self.demand_charge = max_demand * self.tariff.demand_tariff * self.ts.get_num_days() # Use nominal pf to convert to kVA? if self.tariff.demand_type == 'kVA': self.demand_charge = self.demand_charge / self.tariff.assumed_pf else: self.demand_charge = 0 def calc_cash_flow(self): """Calculate receipts and payments for customer. self.cashflows is net volumetric import & export charge, self.energy_bill is total elec bill, ic fixed charges self.total_payment includes opex & capex repayments""" if any(s in self.tariff.solar_rate_name for s in ['self_con', 'Self_Con', 'sc', 'SC']): # IFF solar tariff paid to secondary solar retailer for self-consumed generation # and export FiT paid for exported generation # NB cost of exported self generation is received from retailer and passed to PV seller, so zero net effect # Energy flows treated as if PV is owned by customer self.local_solar_bill = (np.multiply(self.local_consumption, self.tariff.solar_import_tariff) + \ np.multiply(self.exports, self.tariff.export_tariff)).sum() else: self.local_solar_bill = 0.0 if self.tariff.is_dynamic: # ------------------------------------ # calculate tariffs and costs stepwise # ------------------------------------ for step in np.arange(0, self.ts.get_num_steps()): # print(step) # -------------------------------------------------------------- # Solar Block Daily Tariff : Calculate energy used at solar rate # -------------------------------------------------------------- # Fixed daily allocation (set as % of annual generation) charged at solar rate, # residual is at underlying, e.g. TOU if 'Solar_Block_Daily' in self.tariff.tariff_type: print('Solar_Block_Daily NOT SUPPORTED') sys.exit('Solar_Block_Daily NOT SUPPORTED') # SOLAR BLOCK DAILY REMOVED # steps_today = ts.steps_today(step) # # Cumulative Energy for this day: # cumulative_energy = self.imports[steps_today].sum() # if len(steps_today) <= 1: # previous_energy = 0 # else: # previous_energy = self.imports[steps_today[:-1]].sum() # # Allocate local solar allocation depending on cumulative energy relative to quota: # if cumulative_energy <= self.daily_local_quota: # self.solar_allocation[step] = self.imports[step] # elif previous_energy < self.daily_local_quota \ # and cumulative_energy > self.daily_local_quota: # self.solar_allocation[step] = self.daily_local_quota - previous_energy # else: # self.solar_allocation[step] = 0 else: # --------------------------------------------------------- # For Block Tariffs, calc volumetric charges for each block # --------------------------------------------------------- # Block Quarterly Tariff # ---------------------- if self.tariff.tariff_type == 'Block_Quarterly': steps_since_reset = np.mod((step - self.tariff.block_billing_start), self.tariff.steps_in_block) # to include step0 cumulative_energy = self.imports[ step - steps_since_reset:step + 1].sum() # NB only adds to step if steps_since_reset == 0: previous_energy = 0 else: previous_energy = self.imports[step - steps_since_reset:step].sum() # NB adds to step-1 # Block Daily Tariff # ------------------- elif self.tariff.tariff_type == 'Block_Daily': steps_today = self.ts.steps_today(step) cumulative_energy = self.imports[steps_today].sum() if len(steps_today) <= 1: previous_energy = 0 else: previous_energy = self.imports[steps_today[:-1]].sum() if cumulative_energy - previous_energy - self.imports[step] > 0.01: print('accumulation error') # All Block Tariffs: # ----------------- if cumulative_energy <= self.tariff.high_1: self.import_charge[step] = self.imports[step] * self.tariff.block_rate_1 elif previous_energy < self.tariff.high_1 and cumulative_energy <= self.tariff.high_2: self.import_charge[step] = (self.tariff.high_1 - previous_energy) * self.tariff.block_rate_1 + \ (cumulative_energy - self.tariff.high_1) * self.tariff.block_rate_2 elif previous_energy > self.tariff.high_1 and cumulative_energy <= self.tariff.high_2: self.import_charge[step] = self.imports[step] * self.tariff.block_rate_2 elif previous_energy < self.tariff.high_2 and cumulative_energy > self.tariff.high_2: self.import_charge[step] = (self.tariff.high_2 - previous_energy) * self.tariff.block_rate_2 + \ (cumulative_energy - self.tariff.high_2) * self.tariff.block_rate_3 elif previous_energy >= self.tariff.high_2: self.import_charge[step] = self.imports[step] * self.tariff.block_rate_3 elif previous_energy < self.tariff.high_1 and cumulative_energy > self.tariff.high_2: self.import_charge[step] = (self.tariff.high_1 - previous_energy) * self.tariff.block_rate_1 + \ ( self.tariff.high_2 - self.tariff.high_1) * self.tariff.block_rate_2 + \ (cumulative_energy - self.tariff.high_2) * self.tariff.block_rate_3 # ------------------------------------------------------------- # calculate costs using array for static and underlying tariffs # ------------------------------------------------------------- if self.tariff.tariff_type == 'Solar_Block_Daily' or not self.tariff.is_dynamic: self.import_charge = np.multiply((self.imports - self.solar_allocation), self.tariff.import_tariff) # For all dynamic and static tariffs: # ----------------------------------- self.cash_flows = self.import_charge \ + np.multiply(self.solar_allocation, self.tariff.solar_import_tariff) \ - np.multiply(self.exports, self.tariff.export_tariff) # - np.multiply(self.local_exports, self.tariff.local_export_tariff) could be added for LET / P2P # (These are all 1x17520 Arrays.) self.energy_bill = self.cash_flows.sum() + self.tariff.fixed_charge * self.ts.get_num_days() + self.demand_charge if self.name == 'retailer': self.total_payment = self.energy_bill else: # capex, opex in $, energy in c (because tariffs in c/kWh) self.total_payment = self.energy_bill + \ self.local_solar_bill + \ (self.pv_capex_repayment + self.en_capex_repayment + self.en_opex + self.bat_capex_repayment) * 100 # -------- # Calc NPV # -------- self.npv = -sum(self.total_payment / (1 + self.scenario.a_rate / 12) ** t for t in np.arange(1, 12 * self.scenario.a_term))
from django.db.models.signals import class_prepared def patch_user(sender, *args, **kwargs): authmodels = 'django.contrib.auth.models' if sender.__name__ == 'User' and sender.__module__ == authmodels: # patch the length sender._meta.get_field('username').max_length = 80 # patch the help text help_text = "Required. 80 characters or fewer." sender._meta.get_field('username').help_text = help_text # remove the unique constraint sender._meta.get_field('username').unique = False class_prepared.connect(patch_user) # Monkey patch the default admin login form with our custom form def patch_admin_login(): from django import forms from django.contrib import admin from django.contrib import auth ERROR_MESSAGE = "Please enter a correct organization, username, and password." def patched_clean(self): organization = self.cleaned_data.get('organization') username = self.cleaned_data.get('username') password = self.cleaned_data.get('password') message = ERROR_MESSAGE if all([organization, username, password]): self.user_cache = auth.authenticate(organization=organization, username=username, password=password) if not self.user_cache: raise forms.ValidationError(message) if not self.user_cache.is_active or not self.user_cache.is_staff: raise forms.ValidationError(message) self.check_for_test_cookie() return self.cleaned_data org_field = forms.CharField(max_length=80) admin.forms.AdminAuthenticationForm.base_fields['organization'] = org_field admin.forms.AdminAuthenticationForm.clean = patched_clean patch_admin_login()
# Created by MechAviv # Quest ID :: 61145 # Mysterious Merchant Matilda sm.setSpeakerID(9201451) sm.removeEscapeButton() sm.flipDialogue() sm.sendNext("Hi! My name is #bMatilda#k.\r\nI sell lots of handy stuff. And not like those OTHER people that say that.\r\n You have the look of someone about to do something stupid and dangerous. #bI can help!#k") sm.setSpeakerID(9201451) sm.removeEscapeButton() sm.flipDialogue() sm.sendSay("How about letting me sell you some stuff that might keep you less dead?\r\n#i4143000# #i4140001# #i4142000# #i2501000# #i2500000# #i2320000#\r\nAll you need are #e#bmesos#n#k to get my nifty items.") sm.setSpeakerID(9201451) sm.removeEscapeButton() sm.flipDialogue() sm.sendSay("#bCome visit me in town. Any town!#k\r\nI like to stay mobile.") sm.startQuest(61145) sm.completeQuest(61145)
from django.db import models from django.contrib.auth.models import User class Ban( models.Model ): id = models.AutoField( primary_key = True ) user = models.OneToOneField( User ) ip_address = models.IPAddressField() start_dtm = models.DateTimeField() end_dtm = models.DateTimeField() permaban = models.BooleanField() reason = models.TextField() def __unicode__( self ): return self.user.name class Meta: db_table = 'rpf_ban' app_label= 'rpf'
# 배열 array의 i번째 숫자부터 j번째 숫자까지 자르고 정렬했을 때, k번째에 있는 수를 구하려 합니다. # 예를 들어 array가 [1, 5, 2, 6, 3, 7, 4], i = 2, j = 5, k = 3이라면 # array의 2번째부터 5번째까지 자르면 [5, 2, 6, 3]입니다. # 1에서 나온 배열을 정렬하면 [2, 3, 5, 6]입니다. # 2에서 나온 배열의 3번째 숫자는 5입니다. # 배열 array, [i, j, k]를 원소로 가진 2차원 배열 commands가 매개변수로 주어질 때, # commands의 모든 원소에 대해 앞서 설명한 연산을 적용했을 때 나온 결과를 배열에 담아 return 하도록 solution 함수를 작성해주세요. def solution(array, commands): answer = [] for i in range(len(commands)): tmp =[] tmp.append(array[commands[i][0]-1:commands[i][1]]) tmp[0].sort() answer.append(tmp[0][commands[i][2]-1]) return answer print(solution([1,5,2,6,3,7,4],[[2,5,3],[4,4,1],[1,7,3]])) # def solution(array, commands): # return list(map(lambda x:sorted(array[x[0]-1:x[1]])[x[2]-1], commands))
import gym import gym.spaces as spaces import Game class CustomEnv(gym.Env): def __init__(self): self.pygame = Game.Pygame2D() self.action_space = spaces.Discrete(180) rows_player_obs = [5] * 15 penality_player_obs = [1] * 7 board_player_obs = [1] * 25 normal_pit_obs = [4] * 5 * 5 #al massimo ci possono essere 3*5 tessere nel discard pit per una singola tessera discard_pit_obs = [3*5] * 5 one_player_obs = rows_player_obs + penality_player_obs + board_player_obs + \ normal_pit_obs + discard_pit_obs self.observation_space = spaces.MultiDiscrete(one_player_obs) def reset(self): del self.pygame self.pygame = Game.Pygame2D() obs = self.pygame.observe() return obs def step(self, action): self.pygame.action(action) obs = self.pygame.observe() reward = self.pygame.evaluete() done = self.pygame.is_done() return obs,reward,done,{} def render(self, mode='human'): print(self.pygame.view())
from details.models import Person from django.forms import Textarea, CheckboxSelectMultiple from django.forms.models import ModelMultipleChoiceField from django.utils.translation import ugettext as _ from django.contrib import admin from django.conf import settings from django.db import models from common.admintools import export_xlsx, printable_html class PersonAdminAbstract(admin.ModelAdmin): change_form_template = 'admin/my_change_form.html' list_display = ('person_id','demographics_gender','demographics_age','person_creationDate','person_updateDate','person_user',) list_filter = ('demographics_gender','demographics_age',) search_fields = ['person_id',] readonly_fields = ('person_id', 'person_creationDate', 'person_updateDate', 'person_user',) fieldsets = [ ('Demographics',{ 'classes': ('suit-tab suit-tab-2demographics',), 'fields': ['demographics_gender','demographics_age','demographics_weight','demographics_weight'] }), ] suit_form_tabs = [ (u'2demographics', u'2. Demographics') ] radio_fields = { 'demographics_gender': admin.VERTICAL } actions = [export_xlsx,] formfield_overrides = dict(( (models.TextField,dict((( 'widget',Textarea(attrs=dict(rows=5, cols=120,style='width: 600px;') )),) )), (models.ManyToManyField,dict((('widget',CheckboxSelectMultiple),))) ),) class Media: css = dict(all=['generic.css','fixadmin.css']) js = ('generic.js','models/person.js') def save_model(self, request, obj, form, change): if obj.pk==None: obj.person_user = request.user super(PersonAdminAbstract, self).save_model(request, obj, form, change) def queryset(self, request): qs = super(PersonAdminAbstract, self).queryset(request) groups = request.user.groups.all() qs = qs.filter( person_user__groups = groups ).distinct() return qs def get_actions(self, request): actions = super(PersonAdminAbstract, self).get_actions(request) user = request.user #if not user.groups.filter(name=settings.HTML_EXPORTER_PROFILE_GROUP).exists(): del actions['printable_html'] if not user.groups.filter(name=settings.EXCEL_EXPORTER_PROFILE_GROUP).exists(): del actions['export_xlsx'] return actions def construct_change_message(self, request, form, formsets): message = super(PersonAdminAbstract, self).construct_change_message(request, form, formsets) change_message = [] if form.changed_data: values = [] for x in form.changed_data: field = form.fields[x] initial = form.initial[x] value = form.cleaned_data[x] if isinstance(field, ModelMultipleChoiceField): value = [int(y.pk) for y in value] initial = [int(y) for y in initial] values.append( _("<b>%s</b>: <span style='color:#4682B4' >%s</span> -> <span style='color:#00A600' >%s</span>" % (x, str(initial), str(value)) ) ) change_message.append( '<ul><li>%s</li></ul>' % '</li><li>'.join(values) ) message += ' '.join(change_message) return message
""" Model for the adventure. """ # pylint: disable=too-few-public-methods from django.db import models from .mixins import TimestampMixin, DescriptionNotesMixin class Adventure(models.Model, TimestampMixin, DescriptionNotesMixin): """ Model for the adventure. """ name = models.CharField(max_length=128) setting = models.ForeignKey('Setting', models.PROTECT, related_name='adventures') edition = models.ForeignKey('Edition', models.PROTECT, related_name='adventures') author = models.ForeignKey('Author', models.PROTECT, related_name='adventures') publisher = models.ForeignKey('Publisher', models.PROTECT, related_name='adventures') date = models.DateField() characters = models.ManyToManyField('Character', related_name='adventures') monsters = models.ManyToManyField('Monster', related_name='adventures') items = models.ManyToManyField('Item', related_name='adventures') format = models.CharField(max_length=128) min_level = models.IntegerField() max_level = models.IntegerField() min_characters = models.IntegerField() max_characters = models.IntegerField()
import json import pandas as pd import numpy as np from tqdm import tqdm def get_spdat_feat_types(df): ''' Get lists of features containing the single-valued categorical and noncategorical feature names in the SPDAT data :param df: Pandas DataFrame containing client SPDAT question info :return: List of single-valued categorical features, list of noncategorical features ''' sv_cat_feats = [] noncat_feats = [] for column in df.columns: if df[column].dtype == 'object': sv_cat_feats.append(column) else: noncat_feats.append(column) return sv_cat_feats, noncat_feats def get_spdat_data(spdat_path, gt_end_date): ''' Read SPDAT data from raw SPDAT file and output a dataframe containing clients' answers to the questions. :param spdat_path: The file path of the raw SPDAT data :param gt_end_date: the date used for ground truth calculation :return: A DataFrame in which each row is a client's answers to SPDAT questions ''' def single_client_record(client_df): ''' Helper function for SPDAT data preprocessing. Processes records for a single client. :param client_df: Raw SPDAT data for 1 client :return: DataFrame with 1 row detailing client's answers to SPDAT questions ''' client_answers = dict.fromkeys(questions, [np.nan]) # The columns will be SPDAT questions for row in client_df.itertuples(): question = getattr(row, 'QuestionE') if question in questions: answer = str(getattr(row, 'ScoreValue')) answer = float(answer) if answer.isnumeric() else answer client_answers[question] = [answer] # Set values to client's answers return pd.DataFrame.from_dict(client_answers) tqdm.pandas() # Read JSON file containing SPDAT information. Remove line breaks. with open(spdat_path, 'rb') as f: json_str = f.read().decode('utf-16').replace('\r', '').replace('\n', '') spdats = json.loads(json_str)['VISPDATS'] # Convert to object and get the list of SPDATs df = pd.DataFrame(spdats) # Convert JSON object to pandas DataFrame df.fillna(0, inplace=True) # Remove records that were created after the ground truth end date df['SPDAT_Date'] = pd.to_datetime(df['SPDAT_Date'], errors='coerce') df = df[df['SPDAT_Date'] <= gt_end_date] # Replace questions with ellipses with their corresponding descriptions df.loc[df['QuestionE'].str.contains('...'), 'QuestionE'] = df['DescriptionE'] # For questions that have part (a), (b), (c), etc., append their question roots. question_roots = [] last_question_root = '' last_component_numeric = False for row in df.itertuples(): component = str(getattr(row, 'Component')) if component.isnumeric(): last_question_root = str(getattr(row, 'QuestionE')) last_component_numeric = True else: if last_component_numeric: if last_question_root not in question_roots: question_roots.append(last_question_root) last_component_numeric = False df.set_value(row.Index, 'QuestionE', last_question_root + getattr(row, 'QuestionE')) questions = df['QuestionE'].unique() # Get list of unique questions across all SPDAT versions questions = [q for q in questions if q not in question_roots] # Build a DataFrame in which each row is a client's answer to SPDAT questions df_clients = df.groupby('ClientID').progress_apply(single_client_record) df_clients.columns = df_clients.columns.str.replace('%', '') df_clients.columns = df_clients.columns.str.replace('\r', '') df_clients.columns = df_clients.columns.str.replace('\n', '') df_clients = df_clients.droplevel(level=1, axis='index') # Ensure index is ClientID print("# of clients with SPDAT = " + str(df_clients.shape[0])) sv_cat_feats, noncat_feats = get_spdat_feat_types(df_clients) # Classify SPDAT questions as features # Replace "0.0" with "Unknown" for categorical features df_clients[sv_cat_feats] = df_clients[sv_cat_feats].replace(to_replace=0, value='Unknown') return df_clients, sv_cat_feats, noncat_feats
## Ch09 P9.6 from car import Car myCar = Car(50) myCar.addGas(20) myCar.drive(100) print(myCar.getGasLevel())
def spiralNumbers(m): mx = [[0 for i in range(m)] for j in range(m)] cnt = 1 for i in range(m): mx[0][i] = cnt cnt += 1 n = m - 1 while cnt < m ** 2: for j in range(m // 2): for i in range(n): mx[i + j + 1][m - j - 1] = cnt cnt += 1 for i in range(n): mx[m - j - 1][m - j - i - 2] = cnt cnt += 1 n -= 1 for i in range(n): mx[m - j - i - 2][j] = cnt cnt += 1 for i in range(n): mx[j + 1][j + i + 1] = cnt cnt += 1 n -= 1 return mx # рекурсионная формула на Py2 # def spiralNumbers(n, m=0, s=1): # if m == 0: # m = n # if n == 1 == m: # return [[s]] # # # Calculate spiral numbers without first row # S = spiralNumbers(m - 1, n, s + n) # # # Create first row and add the transpose of the rest # return [range(s, s + n)] + zip(*S[::-1]) print(spiralNumbers(5))
#!/usr/bin/env python # coding: utf-8 # In[17]: import cv2 import numpy as np import glob import matplotlib.pyplot as plt import PIL import time import os # In[49]: img_dir = os.path.join(r"Images","*g") img_dir = glob.glob(img_dir) image_l = [] # In[50]: def MSE(image1_gray_resized_np,image2_gray_resized_np): return np.square(image1_gray_resized_np - image2_gray_resized_np).mean() # In[51]: def load_img_and_convert(str): find_mse = lambda x,y:MSE(x,y) img = cv2.imread(str) img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) img = np.asarray(img) img = cv2.resize(img,(224,224)) if len(image_l)==0: image_l.append(img) print("1") else: iso = list(map(lambda x:find_mse(x,img),image_l)) check = all(map(lambda x:x>102,iso)) # print(check,iso) if check: image_l.append(img) # In[52]: def plot_image(): _,axs = plt.subplots(image_l.shape[0]//3 + 1,3,figsize = (12,12)) axs = axs.flatten() for img,ax in zip(image_l,axs): ax.imshow(cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)) plt.axis("off") plt.show() # In[53]: for i in img_dir: try: load_img_and_convert(i) except Exception as e: print(e) # In[54]: print(len(image_l),len(img_dir)) image_l = np.asarray(image_l) print(image_l.shape) # In[55]: plot_image()
# Handle all the exceptions! #Setup actor = {"name": "John Cleese", "rank": "awesome"} #Function to modify, should return the last name of the actor [try except block] def get_last_name(): try: return actor["last_name"] except: namelist=[] namelist= actor["name"].split() return namelist[1] #Test code get_last_name() print ("All exceptions caught! Good job!") print ("The actor's last name is %s" % get_last_name())
HTTP_HEADER_LIST = [ "REMOTE_ADDR", "REMOTE_HOST", "X_FORWARDED_FOR", "TZ", "QUERY_STRING", "CONTENT_LENGTH", "CONTENT_TYPE", "LC_CTYPE", "SERVER_PROTOCOL", "SERVER_SOFTWARE", ] MASKED_DATA = "XXXXXXXXX" CONTENT_TYPE_JSON = "application/json" CONTENT_TYPE_METHOD_MAP = {CONTENT_TYPE_JSON: "_get_json_data"} CLIENT_ERROR_SET = { "AttributeError", "IntegrityError", "KeyError", "ValidationError", } BUILTIN_ERROR_MESSAGE = { "Http404": "Not found", "PermissionDenied": "Permission denied.", } MODEL_VIEWSET_METHODNAMES = ["create", "retrieve", "list", "update", "destroy"] RESPONSE_KEY_DATA = "data" RESPONSE_KEY_ERROR = "error" RESPONSE_KEY_IS_SUCCESS = "is_success"
# Copyright (c) 2010 Jeremy Thurgood <firxen+boto@gmail.com> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # NOTE: These tests only cover the very simple cases I needed to test # for the InstanceGroup fix. import xml.sax from boto import handler from boto.emr import emrobject from boto.resultset import ResultSet from tests.compat import unittest JOB_FLOW_EXAMPLE = b""" <DescribeJobFlowsResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-01-15"> <DescribeJobFlowsResult> <JobFlows> <member> <ExecutionStatusDetail> <CreationDateTime>2009-01-28T21:49:16Z</CreationDateTime> <StartDateTime>2009-01-28T21:49:16Z</StartDateTime> <State>STARTING</State> </ExecutionStatusDetail> <BootstrapActions> <member> <BootstrapActionConfig> <ScriptBootstrapAction> <Args/> <Path>s3://elasticmapreduce/libs/hue/install-hue</Path> </ScriptBootstrapAction> <Name>Install Hue</Name> </BootstrapActionConfig> </member> </BootstrapActions> <VisibleToAllUsers>true</VisibleToAllUsers> <SupportedProducts> <member>Hue</member> </SupportedProducts> <Name>MyJobFlowName</Name> <LogUri>mybucket/subdir/</LogUri> <Steps> <member> <ExecutionStatusDetail> <CreationDateTime>2009-01-28T21:49:16Z</CreationDateTime> <State>PENDING</State> </ExecutionStatusDetail> <StepConfig> <HadoopJarStep> <Jar>MyJarFile</Jar> <MainClass>MyMailClass</MainClass> <Args> <member>arg1</member> <member>arg2</member> </Args> <Properties/> </HadoopJarStep> <Name>MyStepName</Name> <ActionOnFailure>CONTINUE</ActionOnFailure> </StepConfig> </member> </Steps> <JobFlowId>j-3UN6WX5RRO2AG</JobFlowId> <Instances> <Placement> <AvailabilityZone>us-east-1a</AvailabilityZone> </Placement> <SlaveInstanceType>m1.small</SlaveInstanceType> <MasterInstanceType>m1.small</MasterInstanceType> <Ec2KeyName>myec2keyname</Ec2KeyName> <InstanceCount>4</InstanceCount> <KeepJobFlowAliveWhenNoSteps>true</KeepJobFlowAliveWhenNoSteps> </Instances> </member> </JobFlows> </DescribeJobFlowsResult> <ResponseMetadata> <RequestId>9cea3229-ed85-11dd-9877-6fad448a8419</RequestId> </ResponseMetadata> </DescribeJobFlowsResponse> """ JOB_FLOW_COMPLETED = b""" <DescribeJobFlowsResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31"> <DescribeJobFlowsResult> <JobFlows> <member> <ExecutionStatusDetail> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <LastStateChangeReason>Steps completed</LastStateChangeReason> <StartDateTime>2010-10-21T01:03:59Z</StartDateTime> <ReadyDateTime>2010-10-21T01:03:59Z</ReadyDateTime> <State>COMPLETED</State> <EndDateTime>2010-10-21T01:44:18Z</EndDateTime> </ExecutionStatusDetail> <BootstrapActions/> <Name>RealJobFlowName</Name> <LogUri>s3n://example.emrtest.scripts/jobflow_logs/</LogUri> <Steps> <member> <StepConfig> <HadoopJarStep> <Jar>s3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar</Jar> <Args> <member>s3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch</member> </Args> <Properties/> </HadoopJarStep> <Name>Setup Hadoop Debugging</Name> <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure> </StepConfig> <ExecutionStatusDetail> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <StartDateTime>2010-10-21T01:03:59Z</StartDateTime> <State>COMPLETED</State> <EndDateTime>2010-10-21T01:04:22Z</EndDateTime> </ExecutionStatusDetail> </member> <member> <StepConfig> <HadoopJarStep> <Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar> <Args> <member>-mapper</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-InitialMapper.py</member> <member>-reducer</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-InitialReducer.py</member> <member>-input</member> <member>s3://example.emrtest.data/raw/2010/10/20/*</member> <member>-input</member> <member>s3://example.emrtest.data/raw/2010/10/19/*</member> <member>-input</member> <member>s3://example.emrtest.data/raw/2010/10/18/*</member> <member>-input</member> <member>s3://example.emrtest.data/raw/2010/10/17/*</member> <member>-input</member> <member>s3://example.emrtest.data/raw/2010/10/16/*</member> <member>-input</member> <member>s3://example.emrtest.data/raw/2010/10/15/*</member> <member>-input</member> <member>s3://example.emrtest.data/raw/2010/10/14/*</member> <member>-output</member> <member>s3://example.emrtest.crunched/</member> </Args> <Properties/> </HadoopJarStep> <Name>testjob_Initial</Name> <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure> </StepConfig> <ExecutionStatusDetail> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <StartDateTime>2010-10-21T01:04:22Z</StartDateTime> <State>COMPLETED</State> <EndDateTime>2010-10-21T01:36:18Z</EndDateTime> </ExecutionStatusDetail> </member> <member> <StepConfig> <HadoopJarStep> <Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar> <Args> <member>-mapper</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step1Mapper.py</member> <member>-reducer</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step1Reducer.py</member> <member>-input</member> <member>s3://example.emrtest.crunched/*</member> <member>-output</member> <member>s3://example.emrtest.step1/</member> </Args> <Properties/> </HadoopJarStep> <Name>testjob_step1</Name> <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure> </StepConfig> <ExecutionStatusDetail> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <StartDateTime>2010-10-21T01:36:18Z</StartDateTime> <State>COMPLETED</State> <EndDateTime>2010-10-21T01:37:51Z</EndDateTime> </ExecutionStatusDetail> </member> <member> <StepConfig> <HadoopJarStep> <Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar> <Args> <member>-mapper</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step2Mapper.py</member> <member>-reducer</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step2Reducer.py</member> <member>-input</member> <member>s3://example.emrtest.crunched/*</member> <member>-output</member> <member>s3://example.emrtest.step2/</member> </Args> <Properties/> </HadoopJarStep> <Name>testjob_step2</Name> <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure> </StepConfig> <ExecutionStatusDetail> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <StartDateTime>2010-10-21T01:37:51Z</StartDateTime> <State>COMPLETED</State> <EndDateTime>2010-10-21T01:39:32Z</EndDateTime> </ExecutionStatusDetail> </member> <member> <StepConfig> <HadoopJarStep> <Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar> <Args> <member>-mapper</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step3Mapper.py</member> <member>-reducer</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step3Reducer.py</member> <member>-input</member> <member>s3://example.emrtest.step1/*</member> <member>-output</member> <member>s3://example.emrtest.step3/</member> </Args> <Properties/> </HadoopJarStep> <Name>testjob_step3</Name> <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure> </StepConfig> <ExecutionStatusDetail> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <StartDateTime>2010-10-21T01:39:32Z</StartDateTime> <State>COMPLETED</State> <EndDateTime>2010-10-21T01:41:22Z</EndDateTime> </ExecutionStatusDetail> </member> <member> <StepConfig> <HadoopJarStep> <Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar> <Args> <member>-mapper</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step4Mapper.py</member> <member>-reducer</member> <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step4Reducer.py</member> <member>-input</member> <member>s3://example.emrtest.step1/*</member> <member>-output</member> <member>s3://example.emrtest.step4/</member> </Args> <Properties/> </HadoopJarStep> <Name>testjob_step4</Name> <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure> </StepConfig> <ExecutionStatusDetail> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <StartDateTime>2010-10-21T01:41:22Z</StartDateTime> <State>COMPLETED</State> <EndDateTime>2010-10-21T01:43:03Z</EndDateTime> </ExecutionStatusDetail> </member> </Steps> <JobFlowId>j-3H3Q13JPFLU22</JobFlowId> <Instances> <SlaveInstanceType>m1.large</SlaveInstanceType> <MasterInstanceId>i-64c21609</MasterInstanceId> <Placement> <AvailabilityZone>us-east-1b</AvailabilityZone> </Placement> <InstanceGroups> <member> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <InstanceRunningCount>0</InstanceRunningCount> <StartDateTime>2010-10-21T01:02:09Z</StartDateTime> <ReadyDateTime>2010-10-21T01:03:03Z</ReadyDateTime> <State>ENDED</State> <EndDateTime>2010-10-21T01:44:18Z</EndDateTime> <InstanceRequestCount>1</InstanceRequestCount> <InstanceType>m1.large</InstanceType> <Market>ON_DEMAND</Market> <LastStateChangeReason>Job flow terminated</LastStateChangeReason> <InstanceRole>MASTER</InstanceRole> <InstanceGroupId>ig-EVMHOZJ2SCO8</InstanceGroupId> <Name>master</Name> </member> <member> <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime> <InstanceRunningCount>0</InstanceRunningCount> <StartDateTime>2010-10-21T01:03:59Z</StartDateTime> <ReadyDateTime>2010-10-21T01:03:59Z</ReadyDateTime> <State>ENDED</State> <EndDateTime>2010-10-21T01:44:18Z</EndDateTime> <InstanceRequestCount>9</InstanceRequestCount> <InstanceType>m1.large</InstanceType> <Market>ON_DEMAND</Market> <LastStateChangeReason>Job flow terminated</LastStateChangeReason> <InstanceRole>CORE</InstanceRole> <InstanceGroupId>ig-YZHDYVITVHKB</InstanceGroupId> <Name>slave</Name> </member> </InstanceGroups> <NormalizedInstanceHours>40</NormalizedInstanceHours> <HadoopVersion>0.20</HadoopVersion> <MasterInstanceType>m1.large</MasterInstanceType> <MasterPublicDnsName>ec2-184-72-153-139.compute-1.amazonaws.com</MasterPublicDnsName> <Ec2KeyName>myubersecurekey</Ec2KeyName> <InstanceCount>10</InstanceCount> <KeepJobFlowAliveWhenNoSteps>false</KeepJobFlowAliveWhenNoSteps> </Instances> </member> </JobFlows> </DescribeJobFlowsResult> <ResponseMetadata> <RequestId>c31e701d-dcb4-11df-b5d9-337fc7fe4773</RequestId> </ResponseMetadata> </DescribeJobFlowsResponse> """ class TestEMRResponses(unittest.TestCase): def _parse_xml(self, body, markers): rs = ResultSet(markers) h = handler.XmlHandler(rs, None) xml.sax.parseString(body, h) return rs def _assert_fields(self, response, **fields): for field, expected in fields.items(): actual = getattr(response, field) self.assertEquals(expected, actual, "Field %s: %r != %r" % (field, expected, actual)) def test_JobFlows_example(self): [jobflow] = self._parse_xml(JOB_FLOW_EXAMPLE, [('member', emrobject.JobFlow)]) self._assert_fields(jobflow, creationdatetime='2009-01-28T21:49:16Z', startdatetime='2009-01-28T21:49:16Z', state='STARTING', instancecount='4', jobflowid='j-3UN6WX5RRO2AG', loguri='mybucket/subdir/', name='MyJobFlowName', availabilityzone='us-east-1a', slaveinstancetype='m1.small', masterinstancetype='m1.small', ec2keyname='myec2keyname', keepjobflowalivewhennosteps='true') def test_JobFlows_completed(self): [jobflow] = self._parse_xml(JOB_FLOW_COMPLETED, [('member', emrobject.JobFlow)]) self._assert_fields(jobflow, creationdatetime='2010-10-21T01:00:25Z', startdatetime='2010-10-21T01:03:59Z', enddatetime='2010-10-21T01:44:18Z', state='COMPLETED', instancecount='10', jobflowid='j-3H3Q13JPFLU22', loguri='s3n://example.emrtest.scripts/jobflow_logs/', name='RealJobFlowName', availabilityzone='us-east-1b', slaveinstancetype='m1.large', masterinstancetype='m1.large', ec2keyname='myubersecurekey', keepjobflowalivewhennosteps='false') self.assertEquals(6, len(jobflow.steps)) self.assertEquals(2, len(jobflow.instancegroups))
''' Surprisingly there are only three numbers that can be written as the sum of fourth powers of their digits: 1634 = 14 + 64 + 34 + 44 8208 = 84 + 24 + 04 + 84 9474 = 94 + 44 + 74 + 44 As 1 = 14 is not a sum it is not included. The sum of these numbers is 1634 + 8208 + 9474 = 19316. Find the sum of all the numbers that can be written as the sum of fifth powers of their digits. ''' def sum_of_digits(n, p): sum = 0 while n > 0: sum += (n % 10) ** p n /= 10 return sum print sum(n for n in xrange(2, 200000) if sum_of_digits(n, 5) == n)
# Prim algorithm # input where n is weight of each edge # 0,2,1,0,0 # 2,0,1,2,3 # 1,1,0,0,4 # 0,2,0,0,2 # 0,3,4,2,0 import random as rand def printGraph(g): for row in g: print (row) print ("---------------------------\n") def fillGraph (inputfile): graph = [] f = open(inputfile,"r+") numberOfVertexes = 0 for row in f: row = row.split(',') row = [int(x) for x in row] graph.append(row) numberOfVertexes += 1 f.close() return graph, numberOfVertexes def findEdge (g,n,T,N): menor = 0xFFFFFFFF for row in T: for column in N: element = g[row][column] if element < menor and element != 0: menor = element y = row x = column return x,y def prim (g, n): Tmin = set() # set of minimum tree T = set() # set of visited vertexes N = set() # set of non-visited vertexes for idx in range(n): N.add(idx) i = rand.randrange(0,n) cost = 0 T.add(i) N.remove(i) while len(T) != n: ex, ey = findEdge(g,n,T,N) T.add(ex) N.remove(ex) Tmin.add(ex) Tmin.add(ey) print (ey,'->',ex,':',g[ex][ey]) cost = cost + g[ex][ey] print ("Custo da árvore geradora mínima:",cost) def main(): inputfile = "graphs/g3.txt" # Open file g, n = fillGraph(inputfile) # Fill graph print ("Grafo Original: ") printGraph(g) # Print graph prim(g,n) main()
from setuptools import setup, find_packages setup( name='pyIID', version='', packages=find_packages(exclude=['doc', 'benchmarks', 'extra', 'scripts', 'examples' ,]), url='', license='', author='christopher', author_email='', description='', requires=['scipy'] )
import torch a = torch.rand((16, 1024, 14, 24)) b = torch.rand((16, 1024, 14, 24)) c = torch.cat([a, b], dim=1) print(c.shape)
import torch import torch.nn as nn from torch.autograd import Variable """ Generator network """ class _netG(nn.Module): def __init__(self, opt, nclasses): super(_netG, self).__init__() self.ndim = 2*opt.ndf self.ngf = opt.ngf self.nz = opt.nz self.gpu = opt.gpu self.nclasses = nclasses self.main = nn.Sequential( nn.ConvTranspose2d(self.nz+self.ndim+nclasses+1, self.ngf*8, 2, 1, 0, bias=False), nn.BatchNorm2d(self.ngf*8), nn.ReLU(True), nn.ConvTranspose2d(self.ngf*8, self.ngf*4, 4, 2, 1, bias=False), nn.BatchNorm2d(self.ngf*4), nn.ReLU(True), nn.ConvTranspose2d(self.ngf*4, self.ngf*2, 4, 2, 1, bias=False), nn.BatchNorm2d(self.ngf*2), nn.ReLU(True), nn.ConvTranspose2d(self.ngf*2, self.ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(self.ngf), nn.ReLU(True), nn.ConvTranspose2d(self.ngf, 3, 4, 2, 1, bias=False), nn.Tanh() ) def forward(self, input): batchSize = input.size()[0] input = input.view(-1, self.ndim+self.nclasses+1, 1, 1) noise = torch.FloatTensor(batchSize, self.nz, 1, 1).normal_(0, 1) if self.gpu>=0: noise = noise.cuda() noisev = Variable(noise) output = self.main(torch.cat((input, noisev),1)) return output """ Discriminator network """ class _netD(nn.Module): def __init__(self, opt, nclasses): super(_netD, self).__init__() self.opt = opt self.ndf = opt.ndf self.feature = nn.Sequential( nn.Conv2d(3, self.ndf, 3, 1, 1), nn.BatchNorm2d(self.ndf), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool2d(2,2), nn.Conv2d(self.ndf, self.ndf*2, 3, 1, 1), nn.BatchNorm2d(self.ndf*2), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool2d(2,2), nn.Conv2d(self.ndf*2, self.ndf*4, 3, 1, 1), nn.BatchNorm2d(self.ndf*4), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool2d(2,2), nn.Conv2d(self.ndf*4, self.ndf*2, 3, 1, 1), nn.BatchNorm2d(self.ndf*2), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool2d(4,4) ) self.classifier_s = nn.Sequential( nn.Linear(self.ndf*2, 1), nn.Sigmoid()) if opt.auxLoss: self.classifier_c = nn.Sequential(nn.Linear(self.ndf*2, nclasses)) def forward(self, input): output = self.feature(input) output_s = self.classifier_s(output.view(-1, self.ndf*2)) output_s = output_s.view(-1) if self.opt.auxLoss: output_c = self.classifier_c(output.view(-1, self.ndf*2)) return output_s, output_c else: return output_s, None """ Feature extraction network """ class _netF(nn.Module): def __init__(self, opt): super(_netF, self).__init__() self.opt = opt self.ndf = opt.ndf self.feature = nn.Sequential( nn.Conv2d(3, self.ndf, 5, 1, 0), nn.ReLU(inplace=True), nn.MaxPool2d(2, 2), nn.Conv2d(self.ndf, self.ndf, 5, 1, 0), nn.ReLU(inplace=True), nn.MaxPool2d(2, 2), nn.Conv2d(self.ndf, self.ndf*2, 5, 1,0), nn.ReLU(inplace=True) ) if self.opt.vae: self.mu = nn.Linear(self.ndf*2, self.ndf*2) self.var = nn.Linear(self.ndf*2, self.ndf*2) def forward(self, input): output = self.feature(input) output = output.view(-1, 2*self.ndf) if self.opt.vae: mu = self.mu(output) var = self.var(output) std = torch.exp(0.5*var) eps = torch.randn_like(std) return eps.mul(std).add_(mu), mu, var return output, None, None """ Classifier network """ class _netC(nn.Module): def __init__(self, opt, nclasses): super(_netC, self).__init__() self.ndf = opt.ndf self.main = nn.Sequential( nn.Linear(2*self.ndf, 2*self.ndf), nn.ReLU(inplace=True), nn.Linear(2*self.ndf, nclasses), ) def forward(self, input): output = self.main(input) return output
# -*- coding=utf-8 -*- #--------------------------------------- # 程序:豆瓣相册爬虫 # 版本:0.2 # 作者:Will # 日期:2014-07-17 # 语言:Python 2.7 # 功能:将相册中照片全部抓下来 # 改进:优先抓取大图;用户只需输入相册编号,自动计算所有页 #--------------------------------------- import urllib import re import datetime import time import urllib2 import random import cookielib def getPicHtml(html): reg = r'href="(http://www.douban.com/photos/photo.+\/)"' imgre = re.compile(reg) imglist = re.findall(imgre,html) for imgurl in imglist: print "Now downloadPage is %r" % imgurl newHtml = getHtml(imgurl) getImg(newHtml) def getHtml(url): user_agents = [ 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11', 'Opera/9.25 (Windows NT 5.1; U; en)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12', 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9', "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7", "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 ", ] agent = random.choice(user_agents) req_header = {'User-Agent':agent} mycookie = urllib2.HTTPCookieProcessor(cookielib.CookieJar()) openner = urllib2.build_opener(mycookie) request = urllib2.Request(url,None,req_header) response = urllib2.urlopen(request) html = response.read() return html def getImg(html): reg = r'src="(.+photo\/photo\/public.+\.jpg)"' imgre = re.compile(reg) imglist = re.findall(imgre,html) for imgurl in imglist: reg_large = r'<a href="http://www.douban.com.+large' largere = re.compile(reg_large) largelist = re.findall(largere,html) if len(largelist)>0: strinfo = re.compile('photo/photo') imgurl = strinfo.sub('photo/large',imgurl) print "picture url is %s" % imgurl x = time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))+str(datetime.datetime.now().microsecond) local = 'E://myimage//' urllib.urlretrieve(imgurl,local+'%s.jpg' % x) # in order to avid being 403 # hava another try latter time.sleep(random.randint(0, 5)) def getIndex(html): reg_index = r'<span class="count">\((.+)\)</span>' indexre = re.compile(reg_index) indexlist = re.findall(indexre,html) indexnum = indexlist[0] num = filter(str.isdigit,indexnum) return num albumId = raw_input('please enter the albumId: ') albumUrl = "http://www.douban.com/photos/album/"+albumId print "Now we start at %r" % albumUrl html = getHtml(albumUrl) getPicHtml(html); #one page include 18 pic max total = int(getIndex(html)) index = total/18 for x in range(1,index+1): index = str(x * 18) albumUrl = "http://www.douban.com/photos/album/"+albumId+"/?start="+index print "Now we goon to the next page" print "Now we come to %r" % albumUrl html = getHtml(albumUrl) getPicHtml(html); print "%d pictures done,enjoy yourslef." % total
import sys n = int(sys.stdin.readline()) for i in range(n): a, b = list(sys.stdin.readline().strip()) a = ord(a) - ord('a') b = int(b) - 1 t = 0 if 0<=a+2<=7 and 0<=b+1<=7: t+=1 if 0<=a+2<=7 and 0<=b-1<=7: t+=1 if 0<=a-2<=7 and 0<=b+1<=7: t+=1 if 0<=a-2<=7 and 0<=b-1<=7: t+=1 if 0<=a+1<=7 and 0<=b+2<=7: t+=1 if 0<=a+1<=7 and 0<=b-2<=7: t+=1 if 0<=a-1<=7 and 0<=b+2<=7: t+=1 if 0<=a-1<=7 and 0<=b-2<=7: t+=1 print t
from rest_framework import mixins from rest_framework import viewsets class CreateListRetrieveViewset( mixins.ListModelMixin, mixins.CreateModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet, ): pass class CreateViewset( mixins.CreateModelMixin, viewsets.GenericViewSet, ): pass class ListRetrieveViewset( mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet, ): pass class RetrieveViewset( mixins.RetrieveModelMixin, viewsets.GenericViewSet, ): pass class ListViewset( mixins.ListModelMixin, viewsets.GenericViewSet, ): pass class UpdateViewset( mixins.UpdateModelMixin, viewsets.GenericViewSet, ): pass class CreateListRetrieveUpdateViewset( mixins.ListModelMixin, mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet, ): pass
#!/usr/bin/env python """ The n^(th) term of the sequence of triangle numbers is given by, t_(n) = 1/2n(n+1); so the first ten triangle numbers are: 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ... By converting each letter in a word to a number corresponding to its alphabetical position and adding these values we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = t_(10). If the word value is a triangle number then we shall call the word a triangle word. Using euler42.txt, a 16K text file containing nearly two-thousand common English words, how many are triangle words? """ def wordsum(word): return sum([ord(c) - ord('A') + 1 for c in word]) def trinum(n): if n & 1: return ((n / 2) + 1) * n else: return (n / 2) * (n + 1) trinums = [trinum(n) for n in range(100)] with open("../data/euler42.txt") as f: wordsums = [wordsum(n.strip('"')) for n in f.read().split(",")] print sum([i in trinums for i in wordsums]), "words are triangle words."
# -*- coding: utf-8 -*- import numpy as np """Function used to compute the loss.""" def compute_loss_mse(y, tx, w): """MAE""" e = y - tx.dot(w) return (np.linalg.norm(e) ** 2) / len(y) def compute_loss_mae(y, tx, w): """MSE""" e = y - tx.dot(w) mae = 0.5 * (np.linalg.norm(e, 1)) / len(y) return mae
API_HOSTS = { "test": "http://192.168.1.100:11002/wp-json/wc/v3/", "dev": "", "prod": "" } DB_HOST = { }
import sys #sys is built in library(the bread and butter) try: #underneath the block try block, you "try" a piece of code that you think might give you an error #In my case, I didn't install request when running import requests except ImportError: #Write specific error #We are looking for an ImportError. If there is an import error, we want to stop the script sys.exit("requests was not properly installed. Try again. Are you sure you are in venv?") #exit() exit our script if the block of code underneath except is ran #Stops script dead, much like an error and sends messaged typed def get_fantasy_points(player,pos): #we write a little function to get a player fantasy points from JSON object #Our JSON is a list of dictionaries with dictionaries nested within if player.get("position")==pos: #check if our player has the correct position with the if block #pos is our new variable to be used instead of position return player.get("fantasy_points").get("ppr") #If they do, we chain two get methods to get back our desired result. #get. is pretty straightforward pos="WR" year="2019" week= 1 res=requests.get('https://www.fantasyfootballdatapros.com/api/players/{0}/{1}'.format(year, week)) #We build our API endpoint using the built in method format #The endpoint requires a season number and and week number which we set abouve if res.ok: #Status code 200, true or ok(500 or 404 denied) #Pass URL into our request.get method #Requests module has a function called get which allows us to make an HTTP GET request #Remember, a GET request is what you do everyday when you request a resource form a webpage #Instead here, we are requesting a JSON object that we can use in our code print("Season {0}, week{1} VOR for {2}s".format(year, week, pos)) print('-'*40) #Season{0} reference back to api url #We print out to the terminal some info about our script #numbers are matched to format() order #Strings can be multiplied just like integers #'-'*40 means give us '-' 40 times, please make sense later. data = res.json() #.json() to convert our response to JSON so we can use it in code wr_fantasy_points=[get_fantasy_points(player,pos) for player in data] #We use our little helper function to extract fantasy_points for each of our player objects #fantasy_points is how it looks on api #newly created json object is just a python list now wr_fantasy_points=list(filter(lambda x: x is not None, wr_fantasy_points)) #filter out any values which have the value of None mean = lambda x: sum(x)/len(x) #we write a lambda and save it as a variable in order to calculate average of a list #when in doubt, always assume you can something to a variable #We can reference this function by using mean() replacement_value=mean(wr_fantasy_points) for player in data: if player.get("position")==pos: vor=player.get("fantasy_points").get("ppr")-replacement_value print( player.get("player_name"), "had a VOR of", vor )
#!/usr/bin/python # The MIT License (MIT) # # Copyright (c) 2017 Massimiliano Patacchiola # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. #ATTENTION: to work it requires to lunch the iCub world: # yarpserver # ./iCub_SIM # ./iKinGazeCtrl --from configSim.ini # yarpdev --device opencv_grabber # yarp connect /grabber /icubSim/texture/screen # # For the cartesian controller of the left arm # ./simCartesianControl # ./iKinCartesianSolver --context simCartesianControl --part left_arm # PocketSphinx valid Commands are: # The prefix [iCub] or [hey] is optional # learn <object name> # this is a <object name> # forget <object name> # what is this # find the <object name> # stop detection # look at me from speech_recognition import SpeechRecognizer from icub import iCub import cv2 import random import time import os import sys def initialise(): # Initialise the speech recognition engine and the iCub controller my_speech = SpeechRecognizer( hmm_path="/home/massimiliano/pyERA/examples/ex_icub_trust_cognitive_architecture/sphinx/model/en-us/en-us", language_model_path="/home/massimiliano/pyERA/examples/ex_icub_trust_cognitive_architecture/sphinx/model/en-us/en-us.lm.bin", dictionary_path="/home/massimiliano/pyERA/examples/ex_icub_trust_cognitive_architecture/sphinx/data/icub.dic", grammar_path="/home/massimiliano/pyERA/examples/ex_icub_trust_cognitive_architecture/sphinx/data/icub.gram", rule_name='icub.basicCmd', fsg_name="icub") # iCub initialization my_icub = iCub(icub_root='/icubSim') # Load acapela configuration from file my_icub.set_acapela_credential("./acapela_config.csv") account_login, application_login, application_password, service_url = my_icub.get_acapela_credential() print("[ACAPELA]Acapela configuration parameters:") print("Account Login: " + str(account_login)) print("Application Login: " + str(application_login)) print("Account Password: " + str(application_password)) print("Service URL: " + str(service_url)) print("") # Return the objects return my_speech, my_icub def speech_to_action(speech_string): """ Take the sentence from the speech recognition and plan an action <action> = (learn new object | watch | inspect | find | search | look | what | start | stop); <target> = (ball | cup | book | dog | chair | table | at me | is this | movement detection); @param speech_string: @return: """ if speech_string.find('learn') > -1 or speech_string.find('this is a') > -1: response_list = ['I like to learn! This is a ', 'Ok, this is a ', 'I learned a new object, ', ''] object_name = speech_string.rsplit(None, 1)[-1] response_string = response_list[random.randint(0, len(response_list)-1)] + object_name state = 'learn' elif speech_string.find('what is this') > -1: response_string = "" state = 'what' elif speech_string.find('find the') > -1 or speech_string.find('search the') > -1: object_name = speech_string.rsplit(None, 1)[-1] object_path = "./objects/" + str(object_name) + ".png" if not os.path.isfile(object_path): print("[SPEECH-TO-ACTION][WARNING] " + "this file does not exist: " + str(object_path) + "\n") response_string = "Sorry I do not know this object!" state = 'key' else: response_list = ["Ok, now I'm looking for a ", 'Ok I will track the ', 'Ready to track the '] response_string = response_list[random.randint(0, len(response_list)-1)] + object_name state = 'movedetect on' elif speech_string.find('stop detection') > -1: response_list = ["Ok, no more movements", 'Ok I will stop it', "I'm gonna stop it!"] response_string = response_list[random.randint(0, len(response_list)-1)] state = 'movedetect off' elif speech_string.find('look at me') > -1: response_list = ["Ok!", 'Sure!'] response_string = response_list[random.randint(0, len(response_list)-1)] state = 'look' else: response_list = ["Sorry I did not understand.", 'Sorry, can you repeat?', 'Repeat again please.'] response_string = response_list[random.randint(0,len(response_list)-1)] state = 'key' return response_string, state def main(): inputfile = '' outputfile = '' informant_name = '' if len(sys.argv) == 1 or len(sys.argv) > 4: print("python familiarization.py <inputfile> <outputfilename> <informant_name>") elif len(sys.argv) == 4: inputfile = sys.argv[1] outputfile = sys.argv[2] informant_name = sys.argv[3] print("Input file: " + str(inputfile)) print("Output file: " + str(outputfile)) print("Informant Name: " + str(informant_name)) STATE = 'show' speech_string = "" fovea_offset = 40 # side of the fovea square my_speech, my_icub = initialise() is_connected = my_icub.check_connection() if is_connected: print("[STATE Init] intenet connection present.") else: print("[STATE Init][ERROR] internet connection not present!!!") my_icub.say_something(text="I'm ready!") cv2.namedWindow('main') while True: if STATE == 'record': #image = my_icub.return_left_camera_image(mode='BGR') my_speech.record_audio("/tmp/audio.wav", seconds=3, extension='wav', harddev='3,0') raw_file_path = my_speech.convert_to_raw(file_name="/tmp/audio.wav", file_name_raw="/tmp/audio.raw", extension='wav') speech_string = my_speech.return_text_from_audio("/tmp/audio.raw") print("[STATE " + str(STATE) + "] " + "Speech recognised: " + speech_string) STATE = 'understand' elif STATE == 'understand': response_string, local_state = speech_to_action(speech_string) print("[STATE " + str(STATE) + "] " + "Speech recognised: " + speech_string) print("[STATE " + str(STATE) + "] " + "Next state: " + local_state) my_icub.say_something(text=response_string) STATE = local_state elif STATE == 'show': left_image = my_icub.return_left_camera_image(mode='BGR') img_cx = int(left_image.shape[1] / 2) img_cy = int(left_image.shape[0] / 2) cv2.rectangle(left_image, (img_cx-fovea_offset, img_cy-fovea_offset), (img_cx+fovea_offset, img_cy+fovea_offset), (0, 255, 0), 1) cv2.imshow('main', left_image) STATE = 'key' elif STATE == 'movedetect on': object_name = response_string.rsplit(None, 1)[-1] print("[STATE " + str(STATE) + "] " + "start tracking of: " + str(object_name) + "\n") object_path = "./objects/" + str(object_name) + ".png" if my_icub.is_movement_detection(): my_icub.stop_movement_detection() time.sleep(0.5) my_icub.start_movement_detection(template_path=object_path, delay=1.0) else: my_icub.start_movement_detection(template_path=object_path, delay=1.0) STATE = 'key' elif STATE == 'movedetect off': print("[STATE " + str(STATE) + "] " + "stop movement tracking" + "\n") my_icub.stop_movement_detection() time.sleep(0.5) my_icub.reset_head_pose() STATE = 'key' elif STATE == 'look': print("[STATE " + str(STATE) + "] " + "gaze reset" + "\n") my_icub.reset_head_pose() STATE = 'key' elif STATE == 'learn': object_name = response_string.rsplit(None, 1)[-1] print("[STATE " + str(STATE) + "] " + "Learning new object: " + object_name + "\n") left_image = my_icub.return_left_camera_image(mode='BGR') #left_image = image img_cx = int(left_image.shape[1] / 2) img_cy = int(left_image.shape[0] / 2) left_image = left_image[img_cy-fovea_offset:img_cy+fovea_offset, img_cx-fovea_offset:img_cx+fovea_offset] my_icub.learn_object_from_histogram(left_image, object_name) print("[STATE " + str(STATE) + "] " + "Writing new template in ./objects/" + object_name + ".png" + "\n") cv2.imwrite('./objects/' + str(object_name) + '.png', left_image) STATE = 'key' elif STATE == 'what': print("[STATE " + str(STATE) + "] " + "Recalling object from memory..." + "\n") left_image = my_icub.return_left_camera_image(mode='BGR') #left_image = image img_cx = int(left_image.shape[1] / 2) img_cy = int(left_image.shape[0] / 2) left_image = left_image[img_cy-25:img_cy+25, img_cx-25:img_cx+25] object_name = my_icub.recall_object_from_histogram(left_image) if object_name is None: my_icub.say_something("My memory is empty. Teach me something!") else: print("[STATE " + str(STATE) + "] " + "Name returned: " + str(object_name) + "\n") response_list = ["Let me see. I think this is a ", "Let me think. It's a ", "Just a second. It may be a ", "It should be a "] response_string = response_list[random.randint(0, len(response_list) - 1)] my_icub.say_something(response_string + str(object_name)) STATE = 'key' elif STATE == 'key': key_pressed = cv2.waitKey(10) # delay in millisecond if key_pressed==113: #q=QUIT print("[STATE " + str(STATE) + "] " + "Button (q)uit pressed..." + "\n") STATE = "close" elif key_pressed==110: #n= print("[STATE " + str(STATE) + "] " + "Button (n) pressed..." + "\n") elif key_pressed==102: #f= print("[STATE " + str(STATE) + "] " + "Button (f) pressed..." + "\n") elif key_pressed == 114: # r=RECORD print("[STATE " + str(STATE) + "] " + "Button (r)ecord pressed..." + "\n") STATE = "record" else: STATE = 'show' elif STATE == 'close': my_icub.say_something(text="See you soon, bye bye!") my_icub.stop_movement_detection() my_icub.close() cv2.destroyAllWindows() break if __name__ == "__main__": main()
from django.shortcuts import render, redirect from django.http import Http404 from django.contrib import messages from django.core.mail import send_mail from Modelos.models import ( empresas, activi_comerciales, productos, servicios, usuarios, ) from Global.usuario import Usuario from Cliente.carrito import Carrito # Vista para ver el html de error, si una página no existe def error_404_view(request, exception): return render(request, "error/404.html", {"error": "La página no exite"}) def error_view(request): return render(request, "error/404.html", {"error": "La página no exite"}) # Vista que retorna el html de inicio de sesión def vwInicio(request): user_session = Usuario(request) try: if request.session["usuario"]["rol_id"] == 2: return redirect("controlNegocio") elif request.session["usuario"]["rol_id"] == 3: return redirect("admin-web") except: pass list_categorias = activi_comerciales.objects.filter(visible=True).order_by("nombre") return render(request, "index.html", {"categorias": list_categorias}) # Vista que retorna el html de login def vwTplLogin(request): user_session = Usuario(request) try: if request.session["usuario"]["rol_id"] == 1: return redirect("index") elif request.session["usuario"]["rol_id"] == 2: return redirect("controlNegocio") elif request.session["usuario"]["rol_id"] == 3: return redirect("admin-web") except: pass return render(request, "autenticacion/login.html") # Vista que me permita logear, guardando los datos del cliente en una variable session def vwLogin(request): user_session = Usuario(request) try: if request.session["usuario"]["rol_id"] == 1: return redirect("index") elif request.session["usuario"]["rol_id"] == 2: return redirect("controlNegocio") elif request.session["usuario"]["rol_id"] == 3: return redirect("admin-web") except: pass try: usuario = usuarios.objects.get(correo=request.POST["txtUsuario"]) # Identifica si el usuario se encuentra habilitado, para iniciar sessión if not usuario.estado: messages.error(request, "La cuenta que estás intentando ingresar no es valida") return redirect("index") if usuario.credenciales == request.POST["txtCredenciales"]: # Guardar el usuario y carrito en una variable session user_session.add(usuario) cart_session = Carrito(request) return redirect("index") else: messages.error(request, "La contraseña es incorrecta " + usuario.nom_usuario) return redirect("login") except: messages.error(request, "La cuenta que estás ingresando no se encuentra registrada") return redirect("login") # Vista que permite cerrar sessión, eliminando la variable session del usuario y guardando los datos del carrito en la base de datos def vwLogout(request): # Cerrar sessión para cualquier usuario try: if not request.session["usuario"]: messages.info(request, "Debes iniciar sessión") return redirect("login") except: pass user_session = Usuario(request) if request.session["usuario"]["rol_id"] == 1: cart_session = Carrito(request) # Eliminar los valores de carrito y usuario session cart_session.clear() user_session.clear() return redirect("index") # Vista que retorna el html para registrar un cliente def vwTplRegistrar(request): user_session = Usuario(request) try: if request.session["usuario"]["rol_id"] == 1: return redirect("index") elif request.session["usuario"]["rol_id"] == 2: return redirect("controlNegocio") elif request.session["usuario"]["rol_id"] == 3: return redirect("admin-web") except: pass return redirect("solicitar-cuenta") # Vista que retorna el html más la lista de empresas segun su id def vwTplListaNegocios(request, negocio_id): user_session = Usuario(request) try: if request.session["usuario"]["rol_id"] == 2: return redirect("controlNegocio") elif request.session["usuario"]["rol_id"] == 3: return redirect("admin-web") except: pass list_negocios = empresas.objects.filter(activi_comercial_id=negocio_id, estado="Habilitada") return render(request, "empresa/tplListaNegocios.html", {"empresas": list_negocios}) # Vista que carga la información del negocio según su id def vwTplInfoNegocio(request, empresa_id): user_session = Usuario(request) try: if request.session["usuario"]["rol_id"] == 2: return redirect("controlNegocio") except: pass list_negocios = empresas.objects.get(pk=empresa_id, estado="Habilitada") return render(request, "empresa/tplInfoNegocio.html", {"empresa": list_negocios}) # Vista que carga los productos y servicos de la empresa según su id def vwTplPrSrNegocio(request, empresa_id): user_session = Usuario(request) try: if request.session["usuario"]["rol_id"] == 2: return redirect("controlNegocio") elif request.session["usuario"]["rol_id"] == 3: return redirect("admin-web") except: pass em_fotos_servicio = servicios.objects.filter(empresa_id=empresa_id, visible=True, eliminado=False) em_fotos_producto = productos.objects.filter(empresa_id=empresa_id, visible=True, eliminado=False) empresa = empresas.objects.get(pk=empresa_id, estado="Habilitada") return render(request, "empresa/tplPrSrNegocio.html", {"empresa": empresa, "em_fotos_servicio": em_fotos_servicio, "em_fotos_producto": em_fotos_producto})
from pathlib import Path from django.urls import path from . import views from .models import Record activity_short = Path(__file__).parts[-2] app_name = activity_short urlpatterns = [ path('', views.FilterRecord.as_view(), name='index'), path('record/', views.GetRecord.as_view(), name='grecord'), path('frecords/', views.FilterRecord.as_view(), name='frecords'), path('frecords/thanks', views.FilterRecord.as_view(thanks= True), name='thanks'), path('export/', views.Export.as_view(), name='export'), ]
""" delete.py """ import requests from .exceptions import AgaveFilesError from ..utils import handle_bad_response_status_code def files_delete(tenant_url, access_token, file_path): """ Remove a file or direcotry from a remote system """ # Set request url. endpoint = "{0}/{1}/{2}".format(tenant_url, "files/v2/media/system", file_path) # Make request. try: headers = {"Authorization": "Bearer {0}".format(access_token)} params = {"pretty": "true"} resp = requests.delete(endpoint, headers=headers, params=params) except Exception as err: raise AgaveFilesError(err) # Handle bad status code. handle_bad_response_status_code(resp)
def numPaths(y, x): if y==0 and x==0: return 1 if y<0 or x<0: return 0 right = numPaths(y, x-1) down = numPaths(y-1, x) return right+down def test1(): y = 2 x = 3 res = numPaths(y, x) print("res: ", res) test1()
from tqdm import tqdm import numpy as np import os ; os.environ['HDF5_DISABLE_VERSION_CHECK']='2' import tensorflow as tf import tensorflow_datasets as tfds # from codecs import open ds, info = tfds.load('imdb_reviews/subwords8k', with_info=True, as_supervised=True) train_examples, test_examples = ds['train'], ds['test'] encoder = info.features['text'].encoder UFFER_SIZE = 10000 BATCH_SIZE = 128 train_dataset = (train_examples .shuffle(10000) .padded_batch(BATCH_SIZE, padded_shapes=([None], []))) test_dataset = (test_examples .padded_batch(BATCH_SIZE, padded_shapes=([None], []))) GLOVE_DIR = 'glove6b/' GLOVE_EMBEDDING_DIM = 100 unk_word_embedding = np.zeros(GLOVE_EMBEDDING_DIM) embeddings_index = {} with open(GLOVE_DIR + f'glove.6B.{GLOVE_EMBEDDING_DIM}d.txt', encoding='utf-8', errors='ignore') as glove_file: for i, line in tqdm(enumerate(glove_file)): word, *word_embedding = line.split() word_embedding = np.array(word_embedding, dtype='float32') embeddings_index[word] = word_embedding unk_word_embedding += word_embedding unk_word_embedding = unk_word_embedding / i embedding_matrix = np.zeros((encoder.vocab_size, GLOVE_EMBEDDING_DIM)) for i, word in enumerate(encoder._subwords): embedding_vector = embeddings_index.get(word.rstrip('_'), unk_word_embedding) embedding_matrix[i] = embedding_vector model = tf.keras.Sequential([ tf.keras.layers.Embedding(encoder.vocab_size, GLOVE_EMBEDDING_DIM), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)), tf.keras.layers.Dense(32, activation='relu'), tf.keras.layers.Dense(1) ]) model.layers[0].set_weights([embedding_matrix]) model.layers[0].trainable = False model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), optimizer="adam", metrics=['accuracy']) model.summary() fit_data = model.fit(train_dataset, epochs=5, validation_data=test_dataset, validation_steps=30) print('Test accuracy: {:.4f}'.format(test_data[1])) # Plot training & validation accuracy values plt.plot(fit_data.history['accuracy']) plt.plot(fit_data.history['val_accuracy']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() # Plot training & validation loss values plt.plot(fit_data.history['loss']) plt.plot(fit_data.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show()
import random a = [4,5,2] #값에 access a[0] a[1] a[2] # 리스트에 랜덤한 value 5 insert append ''' Data Structure 자료구조 지만 많이 사용되기 때문에 basic part에서 설명함. 선형구조 -initialiaztion -init a = [] a = [1, 2, 3, 4] -Data add append 함수를 사용 for i in range(1,101): a.append(i) -check length len(a) -delete del a[2] -insert a.insert(index, number) -slicing a[:3] a[:-1] Q. 리스트 중 최대값과 최소값을 구하가 #리스트 내포 a = [k * k for k in range(10)] #튜플 a = (1,2,3) 한번 값을 할당 하면 바꿀수 없다. list 와 비슷하지만 메모리 측면에서 더 효율적으로 사용할 수도 있다. '''
#!/usr/bin/env python3 """ Takes input a relatedness file, a fam file, and a list of individuals and extracts the sub-matrix from the relatedness file for the given individuals Jean-Tristan Brandenburg """ import sys import pandas as pd import numpy as np import argparse EOL=chr(10) def errorMessage10(phe): print(""" A problem has been detected in file <%s> column <%s>. There is some invalid data. I regret I can't tell you which row. Please check -- the data should be numeric only. If there is missing data, please use NA """%(sys.argv[1],phe)) def parseArguments(): parser = argparse.ArgumentParser(description='fill in missing bim values') parser.add_argument('--rel',type=str,required=True,help="File of relatdness matrix as gemma output") parser.add_argument('--phenofile',type=str,required=True,help="fam file use for compute relatdness matrix") parser.add_argument('--covfile',type=str,required=True,help="fam file use for compute relatdness matrix") parser.add_argument('--pospheno',type=int,required=True,help="fam file use for compute relatdness matrix") parser.add_argument('--relout',type=str,required=True,help="File with output pheno") parser.add_argument('--phenofileout',type=str,required=True,help="File with output pheno") parser.add_argument('--covfileout',type=str,required=True,help="File with output pheno") args = parser.parse_args() return args args=parseArguments() pospheno=args.pospheno readpheno=open(args.phenofile) NewHeader="FID IID\t"+readpheno.readline().split()[pospheno+1] listeFIDKeep=[] DicPheno={} for Lines in readpheno : SplL=Lines.split() if SplL[1+pospheno]!='-9' and SplL[1+pospheno].upper()!="NA" : listeFIDKeep.append(SplL[0]+" "+SplL[1]) DicPheno[SplL[0]+" "+SplL[1]]=SplL[0]+" "+SplL[1]+"\t"+SplL[1+pospheno] readmat=open(args.rel) linemat=readmat.readline() listeFID=linemat.split('\t') print(listeFID[1:5]) readmat.close() ListePosKept=[0] CmtFID=0 FinalIdList=[] for FID in listeFID : if FID in listeFIDKeep : ListePosKept.append(CmtFID) FinalIdList.append(FID) CmtFID+=1 readmat=open(args.rel) writemat=open(args.relout, 'w') CmtL=0 print('begin : open and write maatrix pheno in file '+args.relout) for Line in readmat : Line=Line.replace('\n','') if CmtL in ListePosKept : Chaine=[] SplLine=Line.split('\t') for Pos in ListePosKept : Chaine.append(SplLine[Pos]) writemat.write("\t".join(Chaine)+"\n") CmtL+=1 readmat.close() writemat.close() print('end : open and write maatrix pheno in file '+args.relout) print('begin : write pheno in file '+args.phenofileout) WritePheno=open(args.phenofileout,'w') WritePheno.write(NewHeader+'\n') for FID in FinalIdList : WritePheno.write(DicPheno[FID]+'\n') WritePheno.close() print('end : write pheno in file '+args.phenofileout) readcov=open(args.covfile) NewHeader=readcov.readline().replace('\n','') DicCov={} print('begin : red cov from file '+args.covfile) for Lines in readcov : SplL=Lines.split() DicCov[SplL[0]+" "+SplL[1]]=Lines.replace('\n','') readcov.close() print('emd : red cov from file '+args.covfile) print('begin : write cov '+args.covfileout) writecov=open(args.covfileout, 'w') writecov.write(NewHeader+'\n') for FID in FinalIdList : writecov.write(DicCov[FID]+'\n') writecov.close() print('end : write cov')
# -*- coding: utf-8 -*- import h5py import numpy as np from sklearn.utils import shuffle from keras.models import * from keras.layers import * import pandas as pd from keras.preprocessing.image import * # bottleneck产生测试特征 np.random.seed(2017) X_train = [] X_test = [] y_pred = [] for filename in ["gap_InceptionV3.h5", "gap_Xception.h5", "gap_InceptionResNetV2.h5"]: with h5py.File(filename, 'r') as h: X_train.append(np.array(h['train'])) X_test.append(np.array(h['test'])) y_train = np.array(h['label']) X_train = np.concatenate(X_train, axis=1) X_test = np.concatenate(X_test, axis=1) # 获取保存的模型 model = load_model('model_concat_cvd.h5') y_pred = model.predict(X_test, verbose=1) y_pred = y_pred.clip(min=0.005, max=0.995) # 产生submission df = pd.read_csv("sample_submission.csv") gen = ImageDataGenerator() test_generator = gen.flow_from_directory("/home/autel/Dataset/Cat_vs_Dog/test2", (224, 224), shuffle=False, batch_size=16, class_mode=None) for i, fname in enumerate(test_generator.filenames): index = int(fname[fname.rfind('/')+1:fname.rfind('.')]) df.set_value(index-1, 'label', y_pred[i]) df.to_csv('model_concat_pred.csv', index=None) df.head(10)
list=["a","e","i","o","u","A","E","I","O","U"] a=str(input("enter the value:")) if (a in list): print ("vowel") else: print ("consonant")
import pygame class Settings(): '''Класс для хранения всех настроек классификатора рукописных чисел''' def __init__(self): '''Инициализирует настройки классификатора''' #Параметры экрана self.screen_width = 415 self.screen_height = 250 self.bg_color = (100, 150, 255) #Параметры доски для рисования self.blackboard_width = 250 self.blackboard_height = 250 self.blackboard_color = (0, 0, 0) #Параметры мелка(карандаша) self.crayon_color = (255, 255, 255) self.crayon_width = 4 #Параметры надписей(цвет, шрифт) self.title_color = (30, 30, 30) self.title_font = pygame.font.SysFont(None, 25) #Параметры цифр(цвет, шрифт) self.number_color = (30, 30, 30) self.number_font = pygame.font.SysFont(None, 35)
""" TIME LIMIT PER TEST: 3 seconds MEMORY LIMIT PER TEST: 256 megabytes INPUT: standard input OUTPUT: standard output "Contestant who earns a score equal to or greater than the k-th place finisher's score will advance to the next round, as long as the contestant earns a positive score..." — an excerpt from contest rules. A total of n participants took part in the contest (n ≥ k), and you already know their scores. Calculate how many participants will advance to the next round. INPUT The first line of the input contains two integers n and k (1 ≤ k ≤ n ≤ 50) separated by a single space. The second line contains n space-separated integers a1, a2, ..., an (0 ≤ ai ≤ 100), where ai is the score earned by the participant who got the i-th place. The given sequence is non-increasing (that is, for all i from 1 to n - 1 the following condition is fulfilled: ai ≥ ai + 1). OUTPUT Output the number of participants who advance to the next round. """ n, k = [int(a) for a in input().split()] scores = [int(a) for a in input().split()] accum = 0 for score in scores[:n]: if score >= scores[k-1] and score > 0: accum += 1 print(accum)
import os def clear(): return os.system('cls') should_continue = True def cipherUnrestricted(message, direction, caesarNumber): cipheredMessage = "" if direction == 'e': for letter in message: if letter.isalpha(): cipheredMessage += chr((ord(letter) - 97 + caesarNumber) % 26 + 97) else: characterNumber = ord(letter) - 48 if characterNumber < 0 or characterNumber > 9: cipheredMessage += letter else: cipheredMessage += chr((characterNumber + caesarNumber) % 10 + 48) return cipheredMessage elif direction == 'd': for letter in message: if letter.isalpha(): cipheredMessage += chr((ord(letter) - 97 - caesarNumber) % 26 + 97) else: characterNumber = ord(letter) - 48 if characterNumber < 0 or characterNumber > 9: cipheredMessage += letter else: cipheredMessage += chr((characterNumber - caesarNumber) % 10 + 48) return cipheredMessage while should_continue: direction = input("Type 'e' for encryption or 'd' for decryption: ") if direction != 'e' and direction != 'd': print("Error: Invalid direction") continue clear() mode = "Encryption mode" if direction == 'e' else "Decryption mode" print(mode) message = input("Enter the message: ") caesarNumber = int(input("Enter the number of shifts: ")) print(cipherUnrestricted(message, direction, caesarNumber)) should_continue = input("Continue? (y/n): ") == 'y'
from random import * from metodusok import * from targy import * targyak = Targy("teritve", "troli") print(targyak) also = bekerszam("Alsóhatár?: ",1,7) felso = bekerszam("Felsőhatár?: ",5,10) darab = bekerszam("Darab?: ",1,5) szamok = [] for i in range(darab): #veletlenszam = randint(also,felso) szamok.append(veletlenszam(also,felso)) kiir(szamok)
aa8=int(input()) b6=[int(x) for x in input().split()] zzz=0 for x in range(aa8): for y in range(x): if b6[y]<b6[x]: yyy+=b6[j] print(zzz)
# Created by Brian Mascitello to calculate a specific polynomial's root. def mathproblem(x0): """ function mathproblem(x0) finds a root of the nonlinear function specified by f and fprime. y = 2 ** x - 3 ** (x / 2) - 1; yprime = 0.693147 * (2 ** x) - (1.098612 * 3 ** (x / 2)) / 2; Result x is the root. """ epsilon = 2.2204*10**-16 """ governs precision of convergence where 2.2204*10**-16 = machine epsilion in python """ x = x0 xprevious = 0 k = 0 while abs(float(2 ** x - 3 ** (x / 2) - 1)) > epsilon*abs(float(2 ** x0 - 3 ** (x0 / 2) - 1)) and k < 20: k = k+1 xprevious = x x = float(x) - (float(2 ** x - 3 ** (x / 2) - 1)/float(0.693147 * (2 ** x) - (1.098612 * 3 ** (x / 2)) / 2)) change = abs(float(x - xprevious)) residual = 2 ** x - 3 ** (x / 2) - 1 print("Iteration: %d, Root: %f, Change: %f, Residual: %f" % (k,x,change,residual)) print("Root at",x,"\n") return float(x) print("y = 2 ** x - 3 ** (x / 2) - 1") print("yprime = 0.693147 * (2 ** x) - (1.098612 * 3 ** (x / 2)) / 2") print("Machine epsilon set as: 2.2204*10^-16") x0 = float(input("Please enter your guess of the root: ")) mathproblem(x0)
from flask import Flask import bot_nmap as botmap import json app = Flask(__name__) @app.route('/nmap/<ip>',methods=['GET']) def nmap(ip): return json.dumps(botmap.getPorts(ip)) app.run(host='0.0.0.0',use_reloader=False)
import imp, os.path, sys, time from watchdog.observers import Observer from watchdog.events import PatternMatchingEventHandler import mockingbeat class MyHandler(PatternMatchingEventHandler): patterns = [] @staticmethod def dispatch(event): if event.src_path == ifn: load() ifn = ofn = None def load(): stdout = sys.stdout with file(ofn, 'w') as sys.stdout: try: reload(mockingbeat) mod = imp.load_source(ifn.rsplit('/', 1)[-1].split('.', 1)[0], ifn, file(ifn, 'r')) except: import traceback traceback.print_exc() sys.stdout = stdout def main(_ifn, _ofn): global ifn, ofn ifn = os.path.abspath(_ifn) ofn = _ofn MyHandler.patterns.append(ifn) load() observer = Observer() observer.schedule(MyHandler, os.path.dirname(ifn), recursive=True) observer.start() try: while True: time.sleep(0.1) except KeyboardInterrupt: observer.stop() observer.join() if __name__=='__main__': main(*sys.argv[1:])
# Необходимо набрать из каждый пары ровно одно число так, чтобы сумма всех выбранных числе не делилась на 31 # и при этом была максимально возможной. f = open("27v01_B.txt", 'r') f_len = int(f.readline()) a = [] for i in range(f_len): j, k = map(int, f.readline().split()) a.append([max(j, k), min(j, k), abs(j-k)]) f.close() print(a) f_sum = sum([a[i][0] for i in range(len(a))]) print(f_sum) if f_sum % 31 == 0: i_min = 0 for i in range(len(a)): if a[i][2] == max(a[i][2], a[i_min][2]): i_min = i for i in range(len(a)): if a[i][2] == min(a[i][2], a[i_min][2]): # bool and bool in a [max, min, difference] print(a[i]) if a[i][2] % 31 != 0: i_min = i f_sum = f_sum + a[i_min][1] - a[i_min][0] print(i_min, a[i_min]) print(f_sum)
from .views import AccessViewSet def register(router): router.register(r'access', AccessViewSet, base_name='access')
import math import numpy as np, cv2 width = 640 height = 480 referencePoints = np.float32( [[width/4,height/4], [3*width/4,height/4], [3*width/4,3*height/4], [width/4,3*height/4]]) currentPoint = -1 calibrating = True fullScreen = False names = ['0', 'A risada mais engraçada Pânico na TV.avi', 'Sabe de nada inocente[1].avi']; window_titles = ['first', 'second', 'third'] inputimage1 = cv2.imread("pp.jpg") cap = [cv2.VideoCapture(i) for i in names] frames = [None] * len(names); gray = [None] * len(names); ret = [None] * len(names); rows1, cols1 = inputimage1.shape[:2] pts1 = np.float32([[0,0],[cols1,0],[cols1,rows1],[0,rows1]]) pts2 = np.float32([[0,0],[639,0],[639,479],[0,479]]) image = np.zeros((height, width, 3), np.uint8) def pointColor(n): if n == 0: return (0,0,255) elif n == 1: return (0,255,255) elif n == 2: return (255,255,0) else: return (0,255,0) def mouse(event, x, y, flags, param): global currentPoint if event == cv2.EVENT_LBUTTONDOWN: cp = 0 for point in referencePoints: dist = math.sqrt((x-point[0])*(x-point[0])+(y-point[1])*(y-point[1])) if dist < 4: currentPoint = cp break else: cp = cp + 1 if event == cv2.EVENT_LBUTTONUP: currentPoint = -1 if currentPoint != -1: referencePoints[currentPoint] = [x,y] cv2.namedWindow("test", cv2.WINDOW_NORMAL) cv2.setMouseCallback("test", mouse) while True: image[:] = (0,0,0) if calibrating: color = 0 for point in referencePoints: cv2.circle(image, (int(point[0]), int(point[1])),5,pointColor(color), -1) color = color + 1 ret, frame = cap.read() M = cv2.getPerspectiveTransform(pts1,referencePoints) M2 = cv2.getPerspectiveTransform(pts2,referencePoints) cv2.warpPerspective(frame, M2, (width,height), image, borderMode=cv2.BORDER_TRANSPARENT) #cv2.warpPerspective(inputimage1, M, (width,height), image, borderMode=cv2.BORDER_TRANSPARENT) cv2.imshow("test", image) key = cv2.waitKey(1) & 0xFF if key == ord("c"): calibrating = not calibrating if key == ord("f"): if fullScreen == False: cv2.setWindowProperty("test", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) else: cv2.setWindowProperty("test", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL) fullScreen = not fullScreen if key == ord("q"): break cv2.destroyAllWindows()
# Dependencies import requests from splinter import Browser from webdriver_manager.chrome import ChromeDriverManager import pandas as pd from flask_pymongo import PyMongo from pymongo import MongoClient from bs4 import BeautifulSoup as bs def get_mars_table(): #Get Mars Facts mars_facts_url = 'https://space-facts.com/mars/' mars_data = pd.read_html(mars_facts_url) #convert to html table clean_table = mars_data[0].set_index([0]) clean_table.index.name="Description" clean_table = clean_table.rename(columns={1: ""}) mars_data_table =clean_table.to_html() mars_data_table clean_table return mars_data_table def scrape(): mars_facts_url = 'https://space-facts.com/mars/' response = requests.get(mars_facts_url) # Create BeautifulSoup object; parse with 'html.parser' soup = bs(response.text, 'html.parser') #Scrape the NASA Mars News Site and collect the latest News Title and Paragraph Text. Assign the text to variables that you can reference later. mars_news_scraped = soup.find('div', id = 'facts' ) headline = mars_news_scraped.find('strong').text mars_list = mars_news_scraped.find_all('li')[0] news_text = mars_list.next_sibling.text #open browser executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) #Get Featured Image image_url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html' browser.visit(image_url) image_html = browser.html image_soup = bs(image_html, 'html.parser') featured_image = image_soup.find('div', class_ = 'floating_text_area') featured_image_url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/' + featured_image.a['href'] #Get Mars Facts mars_data = pd.read_html(mars_facts_url) #convert to html table mars_data_table = mars_data[0].to_html() mars_db_data = { 'headline' : headline, 'headline_text':news_text, 'featured_image_url' : featured_image_url } #Connect to PyMongo/MongoDB to store data conn = "mongodb://localhost:27017" client = MongoClient(conn) db = client.mars_db collection = db.mars_data collection.delete_many({}) collection.update({},mars_db_data, upsert=True) #Grab Hemisphere links hemispheres_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(hemispheres_url) hemisphere_image_url = {} short_url = 'https://astrogeology.usgs.gov/' short_img_url = 'https://astrogeology.usgs.gov' hemispheres_html = browser.html hemispheres_soup = bs(hemispheres_html, 'html.parser') hemisphere_items = hemispheres_soup.find('div', class_='collapsible results').find_all('div', class_='item') for hem_url in hemisphere_items: browser.visit(short_url + hem_url.a['href']) hemisphere_body = bs(browser.html, 'html.parser').find('body') image_title = hemisphere_body.find('div', class_='content').find('h2', class_='title').text image_url = hemisphere_body.find('img', class_='wide-image')['src'] hemisphere_image_url['title'] = image_title hemisphere_image_url['img_url'] = short_img_url + image_url collection.insert_one(hemisphere_image_url.copy()) browser.quit()
# Strings ####################################################################################################################### # # Anton and Artur are old friends. Today they practice in writing strings. Anton must write each string # with the lengths exactly N , based on the alphabet of size M . And Arthur, on the contrary, should write each # string with the lengths exactly M , based on the alphabet of size N . Guys spend 1 second to write a single # string. They start writing at the same time. # And now boys are interested in one question. Is it true, that they will finish together? (It is assumed # that the guys don't pause during the writing of strings). # # Input # First line of the input contains the number of test cases T . It is followed by T tets cases. # Each test case has 1 line. Line contains two integer numbers N and M separated by a single space. # # Output # For each test case output "YES" if Anton and Artur will finish at the same time. Else print "NO" # # Constraits # 1 <= T <= 50 # 1 <= N, M <= 10^10000 # # SAMPLE INPUT # 3 # 1 1 # 1 3 # 100 500 # # SAMPLE OUTPUT # YES # NO # NO # # Explanation # In the second case Anton should write three strings. But Artur should write only one string, # and he will finish earlier. # #######################################################################################################################
from notes.infraestructure.adapter.sqlalchemy import SqlAlchemyAdapter from notes.domain.entity.notes import Notes class NotesSqlAlchemyRepository: def __init__(self): self.__adapter = SqlAlchemyAdapter() self.__adapter.entity = Notes def create(self, notes: Notes): try: return self.__adapter.create(notes) except Exception as e: raise e def get_all(self): return self.__adapter.find_all()
import unittest from google_finance import GoogleFinance class TddInPythonExample_Plotter(unittest.TestCase): def test_savepng(self): gf = GoogleFinance() gf.plot() def test_addidxcolumn(self): gf = GoogleFinance() reshaped = gf.addidxcolumn([ [-1, 240, 239, 240, 231, 3260407], [-1, 240, 239, 240, 231, 3260407], [-1, 240, 239, 240, 231, 3260407], [-1, 240, 239, 240, 231, 3260407], [-1, 240, 239, 240, 231, 3260407], [-1, 240, 239, 240, 231, 3260407], [-1, 240, 239, 240, 231, 3260407], [-1, 240, 239, 240, 231, 3260407], [-1, 240, 239, 240, 231, 3260407] ],1) self.assertEqual(reshaped.shape, (9, 7)) self.assertEqual(reshaped[5,0], 5)
from ScenarioHelper import * def main(): CreateScenaFile( "t1310_1.bin", # FileName "t1310", # MapName "t1310", # Location 0x00BD, # MapIndex "ed7161", 0x00002000, # Flags ("", "", "", "", "", ""), # include 0x00, # PlaceNameNumber 0x1A, # PreInitFunctionIndex b'\x00\xff\xff', # Unknown_51 # Information [440, 441, 705, 859, 1004, 1061, 1187, 1417, 1489, 0, 1539, 0, 3172, 3319, 3376, 3529, 0, 3586, 0, 124, 14, 0, 0], ) BuildStringList(( "t1310_1", # 0 )) ChipFrameInfo(440, 0) # 0 ScpFunction(( "Function_0_1B8", # 00, 0 "Function_1_1B9", # 01, 1 "Function_2_2C1", # 02, 2 "Function_3_35B", # 03, 3 "Function_4_3EC", # 04, 4 "Function_5_425", # 05, 5 "Function_6_4A3", # 06, 6 "Function_7_589", # 07, 7 "Function_8_5D1", # 08, 8 "Function_9_603", # 09, 9 "Function_10_C64", # 0A, 10 "Function_11_CF7", # 0B, 11 "Function_12_D30", # 0C, 12 "Function_13_DC9", # 0D, 13 "Function_14_E02", # 0E, 14 "Function_15_E7C", # 0F, 15 "Function_16_F16", # 10, 16 "Function_17_F8B", # 11, 17 "Function_18_FDC", # 12, 18 "Function_19_10AE", # 13, 19 "Function_20_1136", # 14, 20 "Function_21_11B5", # 15, 21 "Function_22_129C", # 16, 22 "Function_23_12E4", # 17, 23 "Function_24_1322", # 18, 24 "Function_25_1B7B", # 19, 25 "Function_26_1C0E", # 1A, 26 "Function_27_1C47", # 1B, 27 "Function_28_1CF0", # 1C, 28 "Function_29_1D87", # 1D, 29 "Function_30_1DC0", # 1E, 30 "Function_31_1E5E", # 1F, 31 "Function_32_1E5F", # 20, 32 "Function_33_1EC6", # 21, 33 "Function_34_1F5C", # 22, 34 "Function_35_1FAD", # 23, 35 "Function_36_2019", # 24, 36 "Function_37_2034", # 25, 37 "Function_38_2090", # 26, 38 "Function_39_20C2", # 27, 39 "Function_40_2159", # 28, 40 "Function_41_21DE", # 29, 41 "Function_42_2209", # 2A, 42 "Function_43_2231", # 2B, 43 "Function_44_22DE", # 2C, 44 "Function_45_233B", # 2D, 45 "Function_46_23C7", # 2E, 46 "Function_47_24AC", # 2F, 47 "Function_48_24F4", # 30, 48 "Function_49_2538", # 31, 49 "Function_50_2E9B", # 32, 50 "Function_51_2F31", # 33, 51 "Function_52_2F6A", # 34, 52 "Function_53_2FEC", # 35, 53 "Function_54_307E", # 36, 54 "Function_55_30B7", # 37, 55 "Function_56_311F", # 38, 56 "Function_57_31B8", # 39, 57 "Function_58_31F1", # 3A, 58 "Function_59_3273", # 3B, 59 "Function_60_330B", # 3C, 60 "Function_61_33C6", # 3D, 61 "Function_62_3432", # 3E, 62 "Function_63_3529", # 3F, 63 "Function_64_3585", # 40, 64 "Function_65_35C3", # 41, 65 "Function_66_360B", # 42, 66 "Function_67_36A0", # 43, 67 "Function_68_36DE", # 44, 68 "Function_69_3763", # 45, 69 "Function_70_3F80", # 46, 70 "Function_71_4016", # 47, 71 "Function_72_404F", # 48, 72 "Function_73_40DD", # 49, 73 "Function_74_4175", # 4A, 74 "Function_75_41A8", # 4B, 75 "Function_76_41F0", # 4C, 76 "Function_77_4258", # 4D, 77 "Function_78_4291", # 4E, 78 "Function_79_435F", # 4F, 79 "Function_80_43FD", # 50, 80 "Function_81_4452", # 51, 81 "Function_82_44D7", # 52, 82 "Function_83_45CA", # 53, 83 "Function_84_45FC", # 54, 84 "Function_85_462E", # 55, 85 "Function_86_4693", # 56, 86 "Function_87_4705", # 57, 87 "Function_88_47A0", # 58, 88 )) def Function_0_1B8(): pass label("Function_0_1B8") Return() # Function_0_1B8 end def Function_1_1B9(): pass label("Function_1_1B9") Call(1, 0) SetChrPos(0x11, 24500, -6000, -19000, 0) SetChrPos(0x12, 27500, -6000, -19000, 0) SetChrPos(0x10, 24500, -6000, -13000, 180) SetChrPos(0x13, 27500, -6000, -13000, 180) SetChrPos(0x14, 27500, -4000, -16000, 0) SetChrChipByIndex(0x11, 0x17) SetChrSubChip(0x11, 0x0) SetChrChipByIndex(0x12, 0x29) SetChrSubChip(0x12, 0x0) SetChrChipByIndex(0x10, 0x18) SetChrSubChip(0x10, 0x0) SetChrChipByIndex(0x13, 0x2E) SetChrSubChip(0x13, 0x0) RunExpression(0x3, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) OP_68(26000, -5000, -16000, 0) MoveCamera(330, 21, 0, 0) OP_6E(650, 0) SetCameraDistance(14000, 0) OP_68(26000, -5000, -16000, 8000) MoveCamera(295, 30, 0, 8000) OP_6E(650, 8000) SetCameraDistance(17000, 8000) FadeToBright(1000, 0) BeginChrThread(0x12, 3, 1, 2) label("loc_2A6") Jc((scpexpr(EXPR_GET_RESULT, 0x3), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_2BD") Sleep(1) Jump("loc_2A6") label("loc_2BD") OP_6F(0x79) OP_0D() Return() # Function_1_1B9 end def Function_2_2C1(): pass label("Function_2_2C1") def lambda_2C6(): OP_9D(0xFE, 0x6B6C, 0xFFFFEC78, 0xFFFFB6F4, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_2C6) SetChrFlags(0xFE, 0x20) def lambda_2E8(): label("loc_2E8") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_2E8") QueueWorkItem2(0xFE, 2, lambda_2E8) Sleep(350) SetChrChipByIndex(0xFE, 0x15) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 100, 0) #C0001 ChrTalk( 0x12, "#6P#7A──前辈!\x02", ) #Auto def lambda_328(): OP_9D(0xFE, 0x5FB4, 0xFFFFF254, 0xFFFFBBA4, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_328) BeginChrThread(0x11, 3, 1, 3) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x29) SetChrSubChip(0xFE, 0x0) Return() # Function_2_2C1 end def Function_3_35B(): pass label("Function_3_35B") Sleep(500) Sound(809, 0, 100, 0) ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x26) SetChrSubChip(0xFE, 0x0) def lambda_376(): OP_9D(0xFE, 0x5FB4, 0xFFFFE890, 0xFFFFBD98, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_376) Sleep(600) SetChrSubChip(0xFE, 0x1) Sound(442, 0, 90, 0) #C0002 ChrTalk( 0x11, "#5P#5A哦!\x02", ) #Auto def lambda_3AF(): OP_96(0xFE, 0x5FB4, 0xFFFFEA84, 0xFFFFCC0C, 0x4E20, 0x0) ExitThread() QueueWorkItem(0x14, 1, lambda_3AF) BeginChrThread(0x10, 3, 1, 4) Sleep(100) SetChrSubChip(0xFE, 0x2) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x17) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) Return() # Function_3_35B end def Function_4_3EC(): pass label("Function_4_3EC") WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 100, 0) def lambda_3FF(): OP_9D(0xFE, 0x6A40, 0xFFFFEC78, 0xFFFFCD38, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_3FF) BeginChrThread(0x13, 3, 1, 5) Sleep(500) SetChrSubChip(0xFE, 0x0) Return() # Function_4_3EC end def Function_5_425(): pass label("Function_5_425") SetChrFlags(0xFE, 0x20) def lambda_42F(): label("loc_42F") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_42F") QueueWorkItem2(0xFE, 2, lambda_42F) Sleep(350) SetChrChipByIndex(0xFE, 0x1C) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 80, 0) #C0003 ChrTalk( 0x13, "#12P#6A交给你了!\x02", ) #Auto def lambda_470(): OP_9D(0xFE, 0x5FB4, 0xFFFFF254, 0xFFFFC694, 0xAF0, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_470) BeginChrThread(0x10, 3, 1, 6) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x2E) SetChrSubChip(0xFE, 0x0) Return() # Function_5_425 end def Function_6_4A3(): pass label("Function_6_4A3") Sleep(500) ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x35) SetChrSubChip(0xFE, 0x0) Sound(809, 0, 100, 0) def lambda_4BE(): OP_9D(0xFE, 0x5FB4, 0xFFFFE890, 0xFFFFC568, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_4BE) Sleep(600) #C0004 ChrTalk( 0x10, "#11P#4S#5A呀!!#3S\x02", ) #Auto SetChrSubChip(0xFE, 0x1) OP_82(0x64, 0x0, 0xBB8, 0x96) PlayEffect(0x0, 0xFF, 0xFE, 0x5, 0, 700, 1000, 0, 0, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0) Sound(442, 0, 100, 0) Sound(547, 0, 40, 0) BeginChrThread(0x14, 3, 1, 7) BeginChrThread(0x11, 3, 1, 8) Sleep(100) SetChrSubChip(0xFE, 0x2) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x18) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) WaitChrThread(0x14, 3) WaitChrThread(0x11, 3) RunExpression(0x3, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Return() # Function_6_4A3 end def Function_7_589(): pass label("Function_7_589") SetChrChip(0x0, 0xFE, 0x1E, 0x12C) OP_96(0xFE, 0x5AD2, 0xFFFFE890, 0xFFFFADF8, 0x4E20, 0x0) Sound(443, 0, 100, 0) Sound(441, 0, 80, 0) OP_9D(0xFE, 0x54F6, 0xFFFFE890, 0xFFFF8FBC, 0x3E8, 0x7D0) SetChrChip(0x1, 0xFE, 0x0, 0x0) Return() # Function_7_589 end def Function_8_5D1(): pass label("Function_8_5D1") SetChrChipByIndex(0xFE, 0x27) SetChrSubChip(0xFE, 0x0) ClearChrFlags(0xFE, 0x1) OP_9C(0xFE, 0x0, 0x0, 0x0, 0x5DC, 0x7D0) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x17) SetChrSubChip(0xFE, 0x0) Return() # Function_8_5D1 end def Function_9_603(): pass label("Function_9_603") OP_50(0x67, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_ADD_SAVE), scpexpr(EXPR_END))) SetChrPos(0x101, 24500, -6000, -19000, 0) SetChrPos(0x11, 27500, -6000, -19000, 0) SetChrPos(0x12, 24500, -6000, -10000, 180) SetChrPos(0x13, 27500, -6000, -13000, 180) SetChrPos(0x10, 21300, -6000, -16000, 90) SetChrPos(0x14, 24500, -5500, -10200, 0) SetChrFlags(0x14, 0x8) SetChrChipByIndex(0x101, 0x1F) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x11, 0x17) SetChrSubChip(0x11, 0x0) SetChrChipByIndex(0x13, 0x2E) SetChrSubChip(0x13, 0x0) SetChrChipByIndex(0x12, 0x9) SetChrSubChip(0x12, 0x0) OP_68(26000, -5000, -16000, 0) MoveCamera(320, 20, 0, 0) OP_6E(650, 0) SetCameraDistance(17000, 0) OP_68(26000, -5000, -16000, 12000) MoveCamera(305, 30, 0, 12000) FadeToBright(1000, 0) OP_0D() RunExpression(0x3, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) BeginChrThread(0x12, 3, 1, 10) label("loc_6FB") Jc((scpexpr(EXPR_GET_RESULT, 0x3), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_712") Sleep(1) Jump("loc_6FB") label("loc_712") OP_4B(0x14, 0xFF) RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) FadeToDark(300, 0, 100) OP_0D() Menu( 0, -1, -1, 0, ( "直接扣击\x01", # 0 "托球回传,由兰迪发动攻击\x01", # 1 ) ) MenuEnd(0x0) OP_60(0x0) FadeToBright(300, 0) OP_0D() OP_4C(0x14, 0xFF) Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_9DD") MoveCamera(315, 25, 0, 1500) SetCameraDistance(15000, 1500) BeginChrThread(0x101, 3, 1, 18) WaitChrThread(0x101, 3) Sound(909, 0, 70, 0) Sleep(500) #C0005 ChrTalk( 0x10, "#13400F#5P#N出界~!!\x02", ) CloseMessageWindow() OP_57(0x0) OP_5A() SetChrChipByIndex(0x101, 0xFF) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x11, 0x8) SetChrSubChip(0x11, 0x0) SetChrChipByIndex(0x13, 0xA) SetChrSubChip(0x13, 0x0) SetChrChipByIndex(0x12, 0x9) SetChrSubChip(0x12, 0x0) OP_63(0x101, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1) Sound(23, 0, 100, 0) Sleep(1000) #C0006 ChrTalk( 0x101, "#12506F#6P糟糕,太着急了吗……\x02", ) CloseMessageWindow() OP_57(0x0) OP_5A() OP_93(0x13, 0xE1, 0x1F4) #C0007 ChrTalk( 0x13, "#12902F呵呵,承让了。\x02", ) CloseMessageWindow() #C0008 ChrTalk( 0x12, "#13006F#5P#N真危险~\x02", ) CloseMessageWindow() OP_93(0x101, 0x5A, 0x1F4) #C0009 ChrTalk( 0x101, "#12500F抱歉!兰迪!\x02", ) CloseMessageWindow() OP_93(0x11, 0x10E, 0x1F4) #C0010 ChrTalk( 0x11, ( "#12800F#12P没事没事!\x01", "接下来就是我们的反击了!\x02", ) ) CloseMessageWindow() FadeToDark(1000, 0, -1) OP_0D() Sound(909, 0, 70, 0) Sleep(500) SetChrChipByIndex(0x101, 0x1F) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x11, 0x17) SetChrSubChip(0x11, 0x0) SetChrChipByIndex(0x13, 0x2E) SetChrSubChip(0x13, 0x0) SetChrChipByIndex(0x12, 0x29) SetChrSubChip(0x12, 0x0) SetChrPos(0x101, 24500, -6000, -19000, 0) SetChrPos(0x11, 27500, -6000, -20000, 0) SetChrPos(0x13, 24500, -6000, -12000, 180) SetChrPos(0x12, 27500, -6000, -13000, 180) SetChrFlags(0x14, 0x8) OP_68(26000, -5000, -16000, 0) MoveCamera(320, 20, 0, 0) OP_6E(650, 0) SetCameraDistance(16000, 0) SetCameraDistance(17000, 1500) FadeToBright(1000, 0) OP_6F(0x79) OP_0D() #C0011 ChrTalk( 0x10, ( "#13400F#5P……比赛结束!!\x02\x03", "#13409F7比12,\x01", "瓦吉队获胜~!\x02", ) ) CloseMessageWindow() #C0012 ChrTalk( 0x101, "#12506F#6P呼,输了呢……\x02", ) CloseMessageWindow() Jump("loc_C58") label("loc_9DD") OP_2C(0xA5, 0x1) MoveCamera(315, 25, 0, 2000) SetCameraDistance(15000, 2000) BeginChrThread(0x101, 3, 1, 20) WaitChrThread(0x101, 3) Sound(909, 0, 70, 0) Sleep(500) #C0013 ChrTalk( 0x10, "#13405F#5P#N哦哦!厉害啊,兰迪!\x02", ) CloseMessageWindow() OP_57(0x0) OP_5A() SetChrChipByIndex(0x101, 0xFF) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x11, 0x8) SetChrSubChip(0x11, 0x0) SetChrChipByIndex(0x13, 0xA) SetChrSubChip(0x13, 0x0) SetChrChipByIndex(0x12, 0x9) SetChrSubChip(0x12, 0x0) #C0014 ChrTalk( 0x12, "#13002F#11P#N唔……干得漂亮。\x02", ) CloseMessageWindow() OP_57(0x0) OP_5A() #C0015 ChrTalk( 0x13, ( "#12906F#11P哎呀呀,身高差距\x01", "实在是让人头疼呢。\x02", ) ) CloseMessageWindow() OP_93(0x11, 0x10E, 0x1F4) #C0016 ChrTalk( 0x11, ( "#12809F#12P传得好,罗伊德,\x01", "你的判断力果然出色。\x02", ) ) CloseMessageWindow() OP_93(0x101, 0x5A, 0x1F4) #C0017 ChrTalk( 0x101, ( "#12509F#5P哈哈,我们只是暂时领先而已。\x02\x03", "#12500F好!乘胜追击吧!!\x02", ) ) CloseMessageWindow() FadeToDark(1000, 0, -1) OP_0D() Sound(909, 0, 70, 0) Sleep(500) SetChrChipByIndex(0x101, 0x1F) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x11, 0x17) SetChrSubChip(0x11, 0x0) SetChrChipByIndex(0x13, 0x2E) SetChrSubChip(0x13, 0x0) SetChrChipByIndex(0x12, 0x29) SetChrSubChip(0x12, 0x0) SetChrPos(0x101, 24500, -6000, -19000, 0) SetChrPos(0x11, 27500, -6000, -20000, 0) SetChrPos(0x13, 24500, -6000, -12000, 180) SetChrPos(0x12, 27500, -6000, -13000, 180) SetChrFlags(0x14, 0x8) OP_68(26000, -5000, -16000, 0) MoveCamera(320, 20, 0, 0) OP_6E(650, 0) SetCameraDistance(16000, 0) SetCameraDistance(17000, 1500) FadeToBright(1000, 0) OP_6F(0x79) OP_0D() #C0018 ChrTalk( 0x10, ( "#13400F#5P……比赛结束!!\x02\x03", "#13409F12比8,警察弟弟队获胜~!\x02", ) ) CloseMessageWindow() #C0019 ChrTalk( 0x101, "#12500F#6P好!赢了!!\x02", ) CloseMessageWindow() label("loc_C58") FadeToDark(1000, 0, -1) OP_0D() Return() # Function_9_603 end def Function_10_C64(): pass label("Function_10_C64") ClearChrFlags(0x14, 0x8) Sound(802, 0, 60, 0) def lambda_C74(): OP_9C(0xFE, 0x0, 0x0, 0x0, 0x5DC, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_C74) SetChrChipByIndex(0xFE, 0x28) SetChrSubChip(0xFE, 0x0) Sleep(1000) Sound(441, 0, 100, 0) SetChrSubChip(0xFE, 0x1) #C0020 ChrTalk( 0x12, "#11P#5A呼!\x02", ) #Auto def lambda_CB6(): OP_9D(0xFE, 0x5FB4, 0xFFFFEA84, 0xFFFFB6F4, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_CB6) BeginChrThread(0x101, 3, 1, 11) Sleep(500) SetChrChipByIndex(0xFE, 0x9) SetChrSubChip(0xFE, 0x0) OP_9B(0x0, 0xFE, 0x0, 0xBB8, 0xFA0, 0x0) SetChrChipByIndex(0xFE, 0x29) SetChrSubChip(0xFE, 0x0) Return() # Function_10_C64 end def Function_11_CF7(): pass label("Function_11_CF7") WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 80, 0) def lambda_D0A(): OP_9D(0xFE, 0x6B6C, 0xFFFFEE6C, 0xFFFFB5C8, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_D0A) BeginChrThread(0x11, 3, 1, 12) Sleep(500) SetChrSubChip(0xFE, 0x0) Return() # Function_11_CF7 end def Function_12_D30(): pass label("Function_12_D30") SetChrFlags(0xFE, 0x20) def lambda_D3A(): label("loc_D3A") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_D3A") QueueWorkItem2(0xFE, 2, lambda_D3A) Sleep(350) SetChrChipByIndex(0xFE, 0x25) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 100, 0) #C0021 ChrTalk( 0x11, "#6P#6A哦!\x02", ) #Auto def lambda_D74(): OP_9D(0xFE, 0x6B6C, 0xFFFFEA84, 0xFFFFCC0C, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_D74) BeginChrThread(0x13, 3, 1, 13) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x17) SetChrSubChip(0xFE, 0x0) Sleep(1500) SetChrChipByIndex(0xFE, 0x8) SetChrSubChip(0xFE, 0x0) OP_9B(0x0, 0xFE, 0x0, 0x7D0, 0xFA0, 0x0) SetChrChipByIndex(0xFE, 0x17) SetChrSubChip(0xFE, 0x0) Return() # Function_12_D30 end def Function_13_DC9(): pass label("Function_13_DC9") WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 100, 0) def lambda_DDC(): OP_9D(0xFE, 0x5FB4, 0xFFFFEC78, 0xFFFFCD38, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_DDC) BeginChrThread(0x12, 3, 1, 14) Sleep(500) SetChrSubChip(0xFE, 0x0) Return() # Function_13_DC9 end def Function_14_E02(): pass label("Function_14_E02") SetChrFlags(0xFE, 0x20) def lambda_E0C(): label("loc_E0C") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_E0C") QueueWorkItem2(0xFE, 2, lambda_E0C) Sleep(350) #C0022 ChrTalk( 0x12, "#11P#8A瓦吉!\x02", ) #Auto SetChrChipByIndex(0xFE, 0x15) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 100, 0) def lambda_E49(): OP_9D(0xFE, 0x6B6C, 0xFFFFF448, 0xFFFFC75C, 0xAF0, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_E49) BeginChrThread(0x13, 3, 1, 15) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x29) SetChrSubChip(0xFE, 0x0) Return() # Function_14_E02 end def Function_15_E7C(): pass label("Function_15_E7C") Sleep(500) ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x30) SetChrSubChip(0xFE, 0x0) #C0023 ChrTalk( 0x13, "#11P#5A嗯……!\x02", ) #Auto Sound(809, 0, 100, 0) def lambda_EAB(): OP_9D(0xFE, 0x6B6C, 0xFFFFE890, 0xFFFFC568, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_EAB) Sleep(600) SetChrSubChip(0xFE, 0x1) EndChrThread(0x14, 0x1) def lambda_ED3(): OP_98(0xFE, 0x0, 0xFFFFFCE0, 0xFFFFF830, 0x2EE0, 0x0) ExitThread() QueueWorkItem(0x14, 1, lambda_ED3) BeginChrThread(0x11, 3, 1, 16) Sleep(100) SetChrSubChip(0xFE, 0x2) Sound(442, 0, 80, 0) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x2E) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) Return() # Function_15_E7C end def Function_16_F16(): pass label("Function_16_F16") SetChrChipByIndex(0xFE, 0x27) SetChrSubChip(0xFE, 0x0) ClearChrFlags(0xFE, 0x1) Sound(809, 0, 100, 0) def lambda_F2E(): OP_9C(0xFE, 0x0, 0x0, 0x0, 0x5DC, 0x1388) ExitThread() QueueWorkItem(0xFE, 1, lambda_F2E) WaitChrThread(0x14, 1) def lambda_F4F(): OP_9D(0xFE, 0x5FB4, 0xFFFFEA84, 0xFFFFCD38, 0x3E8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_F4F) BeginChrThread(0x12, 3, 1, 17) Sound(441, 0, 80, 0) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x17) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) Return() # Function_16_F16 end def Function_17_F8B(): pass label("Function_17_F8B") WaitChrThread(0x14, 1) SetChrFlags(0xFE, 0x20) OP_93(0xFE, 0xB4, 0x0) ClearChrFlags(0xFE, 0x20) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 80, 0) def lambda_FAF(): OP_9D(0xFE, 0x5FB4, 0xFFFFED40, 0xFFFFB5C8, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_FAF) Sleep(300) SetChrSubChip(0xFE, 0x0) Sleep(200) RunExpression(0x3, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Return() # Function_17_F8B end def Function_18_FDC(): pass label("Function_18_FDC") ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x21) SetChrSubChip(0xFE, 0x0) Sound(809, 0, 100, 0) def lambda_FF4(): OP_9D(0xFE, 0x5FB4, 0xFFFFE890, 0xFFFFBBA4, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_FF4) Sleep(600) SetChrSubChip(0xFE, 0x1) EndChrThread(0x14, 0x1) Sound(442, 0, 100, 0) Sound(547, 0, 40, 0) OP_82(0x64, 0x0, 0xBB8, 0x96) PlayEffect(0x0, 0xFF, 0xFE, 0x5, 0, 700, 1000, 0, 0, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0) #C0024 ChrTalk( 0x101, "#5P#6A#4S嘿!!#3S\x02", ) #Auto BeginChrThread(0x14, 3, 1, 19) Sleep(100) SetChrSubChip(0xFE, 0x2) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) WaitChrThread(0x14, 3) Return() # Function_18_FDC end def Function_19_10AE(): pass label("Function_19_10AE") SetChrFlags(0x13, 0x20) def lambda_10B8(): label("loc_10B8") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_10B8") QueueWorkItem2(0x13, 2, lambda_10B8) SetChrFlags(0x12, 0x20) def lambda_10CF(): label("loc_10CF") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_10CF") QueueWorkItem2(0x12, 2, lambda_10CF) SetChrChip(0x0, 0xFE, 0x1E, 0x12C) OP_96(0xFE, 0x68F6, 0xFFFFE890, 0xFFFFDDBE, 0x4E20, 0x0) Sound(443, 0, 100, 0) Sound(441, 0, 80, 0) OP_9D(0xFE, 0x7A76, 0xFFFFE868, 0x1B58, 0x9C4, 0x3E8) SetChrChip(0x1, 0xFE, 0x0, 0x0) EndChrThread(0x13, 0x2) ClearChrFlags(0x13, 0x20) EndChrThread(0x12, 0x2) ClearChrFlags(0x12, 0x20) Return() # Function_19_10AE end def Function_20_1136(): pass label("Function_20_1136") SetChrFlags(0xFE, 0x20) def lambda_1140(): label("loc_1140") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_1140") QueueWorkItem2(0xFE, 2, lambda_1140) Sleep(350) SetChrChipByIndex(0xFE, 0x20) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 80, 0) #C0025 ChrTalk( 0x101, "#5P#5A拜托了!\x02", ) #Auto def lambda_117E(): OP_9D(0xFE, 0x6B6C, 0xFFFFF448, 0xFFFFBF8C, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_117E) BeginChrThread(0x11, 3, 1, 21) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x11, 3) Return() # Function_20_1136 end def Function_21_11B5(): pass label("Function_21_11B5") Sleep(600) ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x26) SetChrSubChip(0xFE, 0x0) Sound(809, 0, 100, 0) def lambda_11D0(): OP_9C(0xFE, 0x0, 0x0, 0x0, 0x5DC, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_11D0) Sleep(600) SetChrSubChip(0xFE, 0x1) EndChrThread(0x14, 0x1) Sound(442, 0, 100, 0) Sound(547, 0, 40, 0) OP_82(0x64, 0x0, 0xBB8, 0x96) PlayEffect(0x0, 0xFF, 0xFE, 0x5, 0, 700, 1000, 0, 0, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0) #C0026 ChrTalk( 0x11, "#5P#10A#4S噢噢噢噢噢!!#3S\x02", ) #Auto Sleep(600) BeginChrThread(0x14, 3, 1, 22) BeginChrThread(0x13, 3, 1, 23) Sleep(100) SetChrSubChip(0xFE, 0x2) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x17) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) WaitChrThread(0x14, 3) WaitChrThread(0x13, 3) Return() # Function_21_11B5 end def Function_22_129C(): pass label("Function_22_129C") SetChrChip(0x0, 0xFE, 0x1E, 0x12C) OP_96(0xFE, 0x6F9A, 0xFFFFE890, 0xFFFFCCCA, 0x4E20, 0x0) Sound(443, 0, 100, 0) Sound(441, 0, 80, 0) OP_9D(0xFE, 0x7F76, 0xFFFFE7F0, 0x762, 0x1388, 0x3E8) SetChrChip(0x1, 0xFE, 0x0, 0x0) Return() # Function_22_129C end def Function_23_12E4(): pass label("Function_23_12E4") SetChrChipByIndex(0xFE, 0x31) SetChrSubChip(0xFE, 0x0) ClearChrFlags(0xFE, 0x1) Sound(809, 0, 100, 0) OP_9C(0xFE, 0x0, 0x0, 0x0, 0x5DC, 0x7D0) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x2E) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) Return() # Function_23_12E4 end def Function_24_1322(): pass label("Function_24_1322") OP_50(0x68, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_ADD_SAVE), scpexpr(EXPR_END))) SetChrPos(0x101, 27500, -6000, -19000, 0) SetChrPos(0x12, 24500, -6000, -19000, 0) SetChrPos(0x10, 24500, -6000, -10000, 180) SetChrPos(0x13, 27500, -6000, -13000, 180) SetChrPos(0x11, 21300, -6000, -16000, 90) SetChrPos(0x14, 24500, -5500, -10200, 0) SetChrFlags(0x14, 0x8) SetChrChipByIndex(0x101, 0x1F) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x12, 0x29) SetChrSubChip(0x12, 0x0) SetChrChipByIndex(0x13, 0x2E) SetChrSubChip(0x13, 0x0) SetChrChipByIndex(0x10, 0x3) SetChrSubChip(0x10, 0x0) OP_68(26000, -5000, -16000, 0) MoveCamera(320, 20, 0, 0) OP_6E(650, 0) SetCameraDistance(17000, 0) OP_68(26000, -5000, -16000, 13000) MoveCamera(305, 30, 0, 13000) FadeToBright(1000, 0) OP_0D() RunExpression(0x3, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) BeginChrThread(0x10, 3, 1, 25) label("loc_141A") Jc((scpexpr(EXPR_GET_RESULT, 0x3), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_1431") Sleep(1) Jump("loc_141A") label("loc_1431") OP_4B(0x14, 0xFF) RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) FadeToDark(300, 0, 100) OP_0D() Menu( 0, -1, -1, 0, ( "预判为强力扣击,参与阻截\x01", # 0 "判断对方的真意,退至后场\x01", # 1 ) ) MenuEnd(0x0) OP_60(0x0) FadeToBright(300, 0) OP_0D() OP_4C(0x14, 0xFF) Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_17FF") MoveCamera(315, 25, 0, 2000) SetCameraDistance(15000, 2000) BeginChrThread(0x13, 3, 1, 39) WaitChrThread(0x13, 3) Sound(909, 0, 70, 0) Sleep(500) #C0027 ChrTalk( 0x11, ( "#12809F#5P#N伊莉娅小姐队得分!\x02\x03", "#12803F话说回来,不愧是瓦吉啊……\x01", "竟然击出如此狡诈的球。\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetChrChipByIndex(0x101, 0xFF) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x12, 0x9) SetChrSubChip(0x12, 0x0) SetChrChipByIndex(0x13, 0xA) SetChrSubChip(0x13, 0x0) SetChrChipByIndex(0x10, 0x3) SetChrSubChip(0x10, 0x0) OP_93(0x13, 0x10E, 0x1F4) #C0028 ChrTalk( 0x13, ( "#12902F#12P呵呵,别说得这么难听嘛,\x01", "这也是战术的一种。\x02", ) ) CloseMessageWindow() OP_93(0x10, 0x87, 0x1F4) #C0029 ChrTalk( 0x10, ( "#13404F#5P嗯,和他们这种单纯刻板的对手比赛,\x01", "没有比这更好的战术了~\x02", ) ) CloseMessageWindow() OP_63(0x101, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1) Sound(23, 0, 100, 0) OP_63(0x12, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1) Sound(23, 0, 100, 0) Sleep(1000) OP_93(0x12, 0x5A, 0x1F4) #C0030 ChrTalk( 0x12, ( "#13001F#5P罗、罗伊德警官……\x01", "他们竟敢这样说我们!\x02", ) ) CloseMessageWindow() OP_93(0x101, 0x10E, 0x1F4) #C0031 ChrTalk( 0x101, ( "#12510F#12P唔唔……绝、绝不能输!\x02\x03", "#12501F既然如此,诺艾尔,我们就靠意志来取胜吧!\x01", "无论如何也要接住他们的球!\x02", ) ) CloseMessageWindow() FadeToDark(1000, 0, -1) OP_0D() Sound(909, 0, 70, 0) Sleep(500) SetChrChipByIndex(0x101, 0x1F) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x12, 0x29) SetChrSubChip(0x12, 0x0) SetChrChipByIndex(0x13, 0x2E) SetChrSubChip(0x13, 0x0) SetChrChipByIndex(0x10, 0x18) SetChrSubChip(0x10, 0x0) SetChrPos(0x101, 27500, -6000, -19000, 0) SetChrPos(0x12, 24500, -6000, -19000, 0) SetChrPos(0x10, 24500, -6000, -13000, 180) SetChrPos(0x13, 27500, -6000, -13000, 180) SetChrFlags(0x14, 0x8) OP_68(26000, -5000, -16000, 0) MoveCamera(320, 20, 0, 0) OP_6E(650, 0) SetCameraDistance(16000, 0) SetCameraDistance(17000, 1500) FadeToBright(1000, 0) OP_6F(0x79) OP_0D() #C0032 ChrTalk( 0x11, ( "#12800F#5P……比赛结束!!\x02\x03", "#12809F3比12,\x01", "伊莉娅小姐队获胜!!\x02", ) ) CloseMessageWindow() #C0033 ChrTalk( 0x101, "#12506F#6P呜……还是不行吗……\x02", ) CloseMessageWindow() Jump("loc_1B6F") label("loc_17FF") OP_2C(0xA5, 0x1) MoveCamera(315, 25, 0, 2000) SetCameraDistance(15000, 2000) BeginChrThread(0x13, 3, 1, 43) WaitChrThread(0x13, 3) Sleep(600) Sound(909, 0, 70, 0) Sleep(500) #C0034 ChrTalk( 0x11, ( "#12800F#5P#N罗伊德队得分!\x02\x03", "#12809F哈哈,干得不错嘛!\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetChrChipByIndex(0x101, 0xFF) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x12, 0x9) SetChrSubChip(0x12, 0x0) SetChrChipByIndex(0x13, 0xA) SetChrSubChip(0x13, 0x0) SetChrChipByIndex(0x10, 0x3) SetChrSubChip(0x10, 0x0) #C0035 ChrTalk( 0x13, ( "#12906F#11P哎呀呀,没想到会被你识破。\x02\x03", "#12902F平时那么单纯刻板,\x01", "竟能做出如此出色的判断~\x02", ) ) CloseMessageWindow() #C0036 ChrTalk( 0x101, ( "#12502F#6P以瓦吉的性格来说,\x01", "在那种情况下肯定会采用取巧手段。\x02\x03", "#12504F与其正面对攻,不如反其道而行。\x02", ) ) CloseMessageWindow() OP_93(0x12, 0x5A, 0x1F4) #C0037 ChrTalk( 0x12, "#13000F#5P成功了!罗伊德警官!\x02", ) CloseMessageWindow() OP_93(0x10, 0x5A, 0x1F4) #C0038 ChrTalk( 0x10, ( "#13400F#5P呵呵,似乎有些\x01", "小看他们了呢。\x02", ) ) CloseMessageWindow() #C0039 ChrTalk( 0x13, "#12904F哈哈,看来是这样啊。\x02", ) CloseMessageWindow() OP_93(0x101, 0x10E, 0x1F4) #C0040 ChrTalk( 0x101, ( "#12500F#12P好!诺艾尔!\x01", "我们就保持这种状态,\x01", "把握住比赛的节奏吧!\x02", ) ) CloseMessageWindow() #C0041 ChrTalk( 0x12, "#13009F#5P嗯!明白了!!\x02", ) CloseMessageWindow() FadeToDark(1000, 0, -1) OP_0D() Sound(909, 0, 70, 0) Sleep(500) SetChrChipByIndex(0x101, 0x1F) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x12, 0x29) SetChrSubChip(0x12, 0x0) SetChrChipByIndex(0x13, 0x2E) SetChrSubChip(0x13, 0x0) SetChrChipByIndex(0x10, 0x18) SetChrSubChip(0x10, 0x0) SetChrPos(0x101, 27500, -6000, -19000, 0) SetChrPos(0x12, 24500, -6000, -19000, 0) SetChrPos(0x10, 24500, -6000, -13000, 180) SetChrPos(0x13, 27500, -6000, -13000, 180) SetChrFlags(0x14, 0x8) OP_68(26000, -5000, -16000, 0) MoveCamera(320, 20, 0, 0) OP_6E(650, 0) SetCameraDistance(16000, 0) SetCameraDistance(17000, 1500) FadeToBright(1000, 0) OP_6F(0x79) OP_0D() #C0042 ChrTalk( 0x11, ( "#12800F#5P……比赛结束!!\x02\x03", "#12809F12比11,\x01", "罗伊德队获胜!!\x02", ) ) CloseMessageWindow() #C0043 ChrTalk( 0x101, "#12512F#6P赢、赢了……勉强险胜啊!\x02", ) CloseMessageWindow() label("loc_1B6F") FadeToDark(1000, 0, -1) OP_0D() Return() # Function_24_1322 end def Function_25_1B7B(): pass label("Function_25_1B7B") ClearChrFlags(0x14, 0x8) Sound(802, 0, 60, 0) def lambda_1B8B(): OP_9C(0xFE, 0x0, 0x0, 0x0, 0x5DC, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_1B8B) SetChrChipByIndex(0xFE, 0x32) SetChrSubChip(0xFE, 0x0) Sleep(1000) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 100, 0) #C0044 ChrTalk( 0x10, "#11P#5A嘿!\x02", ) #Auto def lambda_1BCD(): OP_9D(0xFE, 0x5FB4, 0xFFFFEA84, 0xFFFFB6F4, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_1BCD) BeginChrThread(0x12, 3, 1, 26) Sleep(500) SetChrChipByIndex(0xFE, 0x3) SetChrSubChip(0xFE, 0x0) OP_9B(0x0, 0xFE, 0x0, 0xBB8, 0xFA0, 0x0) SetChrChipByIndex(0xFE, 0x18) SetChrSubChip(0xFE, 0x0) Return() # Function_25_1B7B end def Function_26_1C0E(): pass label("Function_26_1C0E") WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 80, 0) def lambda_1C21(): OP_9D(0xFE, 0x6B6C, 0xFFFFEDA4, 0xFFFFB5C8, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_1C21) BeginChrThread(0x101, 3, 1, 27) Sleep(500) SetChrSubChip(0xFE, 0x0) Return() # Function_26_1C0E end def Function_27_1C47(): pass label("Function_27_1C47") SetChrFlags(0xFE, 0x20) def lambda_1C51(): label("loc_1C51") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_1C51") QueueWorkItem2(0xFE, 2, lambda_1C51) Sleep(350) SetChrChipByIndex(0xFE, 0x20) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 60, 0) #C0045 ChrTalk( 0x101, "#6P#5A诺艾尔!\x02", ) #Auto def lambda_1C8F(): OP_9D(0xFE, 0x5FB4, 0xFFFFF448, 0xFFFFBBA4, 0xAF0, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_1C8F) BeginChrThread(0x12, 3, 1, 28) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) Sleep(1500) SetChrChipByIndex(0xFE, 0xFF) SetChrSubChip(0xFE, 0x0) OP_93(0xFE, 0x0, 0x1F4) OP_98(0xFE, 0x0, 0x0, 0xFFFFFC18, 0xFA0, 0x0) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) Return() # Function_27_1C47 end def Function_28_1CF0(): pass label("Function_28_1CF0") Sleep(500) ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x2B) SetChrSubChip(0xFE, 0x0) Sound(809, 0, 100, 0) def lambda_1D0B(): OP_9D(0xFE, 0x5FB4, 0xFFFFE890, 0xFFFFBD98, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_1D0B) Sleep(600) SetChrSubChip(0xFE, 0x1) EndChrThread(0x14, 0x1) Sound(442, 0, 80, 0) #C0046 ChrTalk( 0x12, "#5P#5A嗯!!\x02", ) #Auto def lambda_1D4A(): OP_96(0xFE, 0x6B6C, 0xFFFFEA84, 0xFFFFCC0C, 0x36B0, 0x0) ExitThread() QueueWorkItem(0x14, 1, lambda_1D4A) BeginChrThread(0x13, 3, 1, 29) Sleep(100) SetChrSubChip(0xFE, 0x2) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x29) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) Return() # Function_28_1CF0 end def Function_29_1D87(): pass label("Function_29_1D87") WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 80, 0) def lambda_1D9A(): OP_9D(0xFE, 0x5FB4, 0xFFFFF448, 0xFFFFC75C, 0xC80, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_1D9A) BeginChrThread(0x10, 3, 1, 30) Sleep(500) SetChrSubChip(0xFE, 0x0) Return() # Function_29_1D87 end def Function_30_1DC0(): pass label("Function_30_1DC0") Sleep(500) ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x35) SetChrSubChip(0xFE, 0x0) #C0047 ChrTalk( 0x10, "#11P#5A接球!\x02", ) #Auto Sound(809, 0, 100, 0) def lambda_1DED(): OP_9D(0xFE, 0x5FB4, 0xFFFFE890, 0xFFFFC568, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_1DED) Sleep(600) SetChrSubChip(0xFE, 0x1) EndChrThread(0x14, 0x1) Sound(442, 0, 80, 0) def lambda_1E1B(): OP_96(0xFE, 0x6B6C, 0xFFFFEA84, 0xFFFFB30C, 0x3A98, 0x0) ExitThread() QueueWorkItem(0x14, 1, lambda_1E1B) BeginChrThread(0x12, 3, 1, 31) BeginChrThread(0x101, 3, 1, 32) Sleep(100) SetChrSubChip(0xFE, 0x2) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x18) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) Return() # Function_30_1DC0 end def Function_31_1E5E(): pass label("Function_31_1E5E") Return() # Function_31_1E5E end def Function_32_1E5F(): pass label("Function_32_1E5F") WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 100, 0) def lambda_1E72(): OP_9D(0xFE, 0x5FB4, 0xFFFFEC78, 0xFFFFBD98, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_1E72) BeginChrThread(0x12, 3, 1, 33) Sleep(500) SetChrSubChip(0xFE, 0x0) Sleep(500) SetChrChipByIndex(0xFE, 0xFF) SetChrSubChip(0xFE, 0x0) OP_93(0xFE, 0x0, 0x1F4) OP_98(0xFE, 0x0, 0x0, 0x5DC, 0xBB8, 0x0) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) Return() # Function_32_1E5F end def Function_33_1EC6(): pass label("Function_33_1EC6") SetChrFlags(0xFE, 0x20) def lambda_1ED0(): label("loc_1ED0") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_1ED0") QueueWorkItem2(0xFE, 2, lambda_1ED0) Sleep(350) SetChrChipByIndex(0xFE, 0x15) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 80, 0) def lambda_1EFB(): OP_9D(0xFE, 0x6B6C, 0xFFFFEA84, 0xFFFFCC0C, 0xAF0, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_1EFB) BeginChrThread(0x13, 3, 1, 34) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x29) SetChrSubChip(0xFE, 0x0) Sleep(1700) SetChrChipByIndex(0xFE, 0x9) SetChrSubChip(0xFE, 0x0) OP_93(0xFE, 0x0, 0x1F4) OP_98(0xFE, 0x5DC, 0x0, 0x0, 0xBB8, 0x0) SetChrChipByIndex(0xFE, 0x29) SetChrSubChip(0xFE, 0x0) Return() # Function_33_1EC6 end def Function_34_1F5C(): pass label("Function_34_1F5C") WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 80, 0) #C0048 ChrTalk( 0x13, "#12P#5A看招吧……!\x02", ) #Auto def lambda_1F87(): OP_9D(0xFE, 0x5FB4, 0xFFFFEC78, 0xFFFFC568, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_1F87) BeginChrThread(0x10, 3, 1, 35) Sleep(500) SetChrSubChip(0xFE, 0x0) Return() # Function_34_1F5C end def Function_35_1FAD(): pass label("Function_35_1FAD") SetChrFlags(0xFE, 0x20) def lambda_1FB7(): label("loc_1FB7") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_1FB7") QueueWorkItem2(0xFE, 2, lambda_1FB7) Sleep(350) SetChrChipByIndex(0xFE, 0x34) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 100, 0) def lambda_1FE2(): OP_9D(0xFE, 0x6B6C, 0xFFFFF448, 0xFFFFC75C, 0xAF0, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_1FE2) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x18) SetChrSubChip(0xFE, 0x0) RunExpression(0x3, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Return() # Function_35_1FAD end def Function_36_2019(): pass label("Function_36_2019") Sleep(500) SetChrChipByIndex(0xFE, 0x3) SetChrSubChip(0xFE, 0x0) OP_93(0xFE, 0xB4, 0x1F4) SetChrChipByIndex(0xFE, 0x18) SetChrSubChip(0xFE, 0x0) Return() # Function_36_2019 end def Function_37_2034(): pass label("Function_37_2034") BeginChrThread(0x10, 3, 1, 36) SetChrChipByIndex(0xFE, 0xFF) SetChrSubChip(0xFE, 0x0) OP_98(0xFE, 0x0, 0x0, 0x5DC, 0xFA0, 0x0) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) SetChrChipByIndex(0xFE, 0x22) SetChrSubChip(0xFE, 0x0) ClearChrFlags(0xFE, 0x1) OP_9C(0xFE, 0x0, 0x0, 0x0, 0x5DC, 0x7D0) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) Return() # Function_37_2034 end def Function_38_2090(): pass label("Function_38_2090") SetChrChipByIndex(0xFE, 0x2C) SetChrSubChip(0xFE, 0x0) ClearChrFlags(0xFE, 0x1) OP_9C(0xFE, 0x0, 0x0, 0x0, 0x5DC, 0x7D0) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x29) SetChrSubChip(0xFE, 0x0) Return() # Function_38_2090 end def Function_39_20C2(): pass label("Function_39_20C2") BeginChrThread(0x101, 3, 1, 37) ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x30) SetChrSubChip(0xFE, 0x0) Sound(809, 0, 100, 0) def lambda_20E0(): OP_9D(0xFE, 0x6B6C, 0xFFFFE890, 0xFFFFC568, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_20E0) Sleep(600) #C0049 ChrTalk( 0x13, "#12P#5A骗你们啦¤\x02", ) #Auto SetChrSubChip(0xFE, 0x1) EndChrThread(0x14, 0x1) BeginChrThread(0x14, 3, 1, 40) BeginChrThread(0x12, 3, 1, 38) Sleep(100) SetChrSubChip(0xFE, 0x2) Sleep(100) Sound(441, 0, 100, 0) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x2E) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) WaitChrThread(0x14, 3) WaitChrThread(0x101, 3) WaitChrThread(0x12, 3) Return() # Function_39_20C2 end def Function_40_2159(): pass label("Function_40_2159") SetChrChip(0x0, 0xFE, 0x1E, 0x12C) OP_9D(0xFE, 0x639C, 0xFFFFE890, 0xFFFFAC04, 0x7D0, 0x3E8) Sound(443, 0, 40, 0) Sound(441, 0, 80, 0) OP_9D(0xFE, 0x639C, 0xFFFFE890, 0xFFFFA628, 0x3E8, 0x3E8) Sound(441, 0, 60, 0) OP_9D(0xFE, 0x639C, 0xFFFFE890, 0xFFFFA240, 0x1F4, 0x3E8) Sound(441, 0, 40, 0) OP_9D(0xFE, 0x639C, 0xFFFFE890, 0xFFFFA04C, 0xC8, 0x3E8) SetChrChip(0x1, 0xFE, 0x0, 0x0) Return() # Function_40_2159 end def Function_41_21DE(): pass label("Function_41_21DE") BeginChrThread(0x10, 3, 1, 36) SetChrChipByIndex(0xFE, 0xFF) SetChrSubChip(0xFE, 0x0) OP_98(0xFE, 0x0, 0x0, 0xFFFFF830, 0xFA0, 0x0) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) Return() # Function_41_21DE end def Function_42_2209(): pass label("Function_42_2209") Sleep(300) SetChrChipByIndex(0xFE, 0x9) SetChrSubChip(0xFE, 0x0) OP_98(0xFE, 0x0, 0x0, 0xFFFFFC18, 0xFA0, 0x0) SetChrChipByIndex(0xFE, 0x29) SetChrSubChip(0xFE, 0x0) Return() # Function_42_2209 end def Function_43_2231(): pass label("Function_43_2231") BeginChrThread(0x101, 3, 1, 41) BeginChrThread(0x12, 3, 1, 42) ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x30) SetChrSubChip(0xFE, 0x0) Sound(809, 0, 100, 0) def lambda_2255(): OP_9D(0xFE, 0x6B6C, 0xFFFFE890, 0xFFFFC568, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_2255) Sleep(600) SetChrSubChip(0xFE, 0x1) EndChrThread(0x14, 0x1) #C0050 ChrTalk( 0x13, "#12P#5A什么……!?\x02", ) #Auto def lambda_2295(): OP_9D(0xFE, 0x6B6C, 0xFFFFEA84, 0xFFFFB118, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_2295) BeginChrThread(0x101, 3, 1, 44) Sleep(100) Sound(441, 0, 100, 0) SetChrSubChip(0xFE, 0x2) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x2E) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) Sleep(5500) Return() # Function_43_2231 end def Function_44_22DE(): pass label("Function_44_22DE") WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 100, 0) def lambda_22F1(): OP_9D(0xFE, 0x6590, 0xFFFFEC78, 0xFFFFB9B0, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_22F1) BeginChrThread(0x12, 3, 1, 45) Sleep(500) SetChrSubChip(0xFE, 0x0) SetChrChipByIndex(0xFE, 0xFF) SetChrSubChip(0xFE, 0x0) OP_98(0xFE, 0x0, 0x0, 0x5DC, 0xFA0, 0x0) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) Return() # Function_44_22DE end def Function_45_233B(): pass label("Function_45_233B") SetChrFlags(0xFE, 0x20) def lambda_2345(): label("loc_2345") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_2345") QueueWorkItem2(0xFE, 2, lambda_2345) Sleep(350) SetChrChipByIndex(0xFE, 0x15) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 80, 0) def lambda_2370(): OP_9D(0xFE, 0x6B6C, 0xFFFFF448, 0xFFFFBBA4, 0xAF0, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_2370) BeginChrThread(0x101, 3, 1, 46) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x29) SetChrSubChip(0xFE, 0x0) SetChrChipByIndex(0x10, 0x3) SetChrSubChip(0x10, 0x0) OP_98(0x10, 0x0, 0x0, 0x5DC, 0xFA0, 0x0) SetChrChipByIndex(0x10, 0x18) SetChrSubChip(0x10, 0x0) Return() # Function_45_233B end def Function_46_23C7(): pass label("Function_46_23C7") Sleep(500) ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x21) SetChrSubChip(0xFE, 0x0) #C0051 ChrTalk( 0x101, "#5P#5A#4S噢噢噢噢噢!#3S\x02", ) #Auto Sound(809, 0, 100, 0) def lambda_23FF(): OP_9D(0xFE, 0x6B6C, 0xFFFFE890, 0xFFFFBD98, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_23FF) Sleep(600) SetChrSubChip(0xFE, 0x1) EndChrThread(0x14, 0x1) Sound(442, 0, 100, 0) Sound(547, 0, 40, 0) OP_82(0x64, 0x0, 0xBB8, 0x96) PlayEffect(0x0, 0xFF, 0xFE, 0x5, 0, 700, 1000, 0, 0, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0) BeginChrThread(0x14, 3, 1, 47) BeginChrThread(0x13, 3, 1, 48) Sleep(100) SetChrSubChip(0xFE, 0x2) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) WaitChrThread(0x14, 3) WaitChrThread(0x13, 3) Return() # Function_46_23C7 end def Function_47_24AC(): pass label("Function_47_24AC") SetChrChip(0x0, 0xFE, 0x1E, 0x12C) OP_96(0xFE, 0x6658, 0xFFFFE890, 0xFFFFCE5A, 0x4E20, 0x0) Sound(443, 0, 100, 0) Sound(441, 0, 80, 0) OP_9D(0xFE, 0x774C, 0xFFFFE890, 0xFFFFFA9C, 0x1388, 0x3E8) SetChrChip(0x1, 0xFE, 0x0, 0x0) Return() # Function_47_24AC end def Function_48_24F4(): pass label("Function_48_24F4") SetChrChipByIndex(0xFE, 0x31) SetChrSubChip(0xFE, 0x0) ClearChrFlags(0xFE, 0x1) Sound(809, 0, 100, 0) OP_9C(0xFE, 0x0, 0x0, 0x0, 0x5DC, 0x7D0) Sound(441, 0, 80, 0) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x2E) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) Return() # Function_48_24F4 end def Function_49_2538(): pass label("Function_49_2538") OP_50(0x69, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_ADD_SAVE), scpexpr(EXPR_END))) SetChrPos(0x101, 24500, -6000, -19000, 0) SetChrPos(0x13, 27500, -6000, -22000, 0) SetChrPos(0x11, 24500, -6000, -13000, 180) SetChrPos(0x10, 27500, -6000, -13000, 180) SetChrPos(0x12, 21300, -6000, -16000, 90) SetChrPos(0x14, 27500, -5500, -21800, 0) SetChrFlags(0x14, 0x8) SetChrChipByIndex(0x101, 0x1F) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x13, 0xA) SetChrSubChip(0x13, 0x0) SetChrChipByIndex(0x10, 0x18) SetChrSubChip(0x10, 0x0) SetChrChipByIndex(0x11, 0x17) SetChrSubChip(0x11, 0x0) OP_68(26000, -5000, -16000, 0) MoveCamera(320, 20, 0, 0) OP_6E(650, 0) SetCameraDistance(17000, 0) OP_68(26000, -5000, -16000, 15000) MoveCamera(305, 30, 0, 15000) FadeToBright(1000, 0) OP_0D() RunExpression(0x3, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) BeginChrThread(0x13, 3, 1, 50) label("loc_2630") Jc((scpexpr(EXPR_GET_RESULT, 0x3), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_2647") Sleep(1) Jump("loc_2630") label("loc_2647") OP_4B(0x14, 0xFF) RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) FadeToDark(300, 0, 100) OP_0D() Menu( 0, -1, -1, 0, ( "将球强攻向伊莉娅和兰迪之间\x01", # 0 "瞄准后场边界线,击出弧线球\x01", # 1 ) ) MenuEnd(0x0) OP_60(0x0) FadeToBright(300, 0) OP_0D() OP_4C(0x14, 0xFF) Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_2B08") MoveCamera(315, 25, 0, 2000) SetCameraDistance(15000, 2000) BeginChrThread(0x101, 3, 1, 62) WaitChrThread(0x101, 3) Sound(909, 0, 70, 0) Sleep(500) #C0052 ChrTalk( 0x12, "#13002F#5P#N伊莉娅小姐队得分!\x02", ) CloseMessageWindow() OP_57(0x0) OP_5A() SetChrChipByIndex(0x101, 0xFF) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x13, 0xA) SetChrSubChip(0x13, 0x0) SetChrChipByIndex(0x11, 0x8) SetChrSubChip(0x11, 0x0) SetChrChipByIndex(0x10, 0x3) SetChrSubChip(0x10, 0x0) OP_93(0x11, 0x5A, 0x1F4) #C0053 ChrTalk( 0x11, ( "#12809F#5P完美!!\x01", "真不愧是伊莉娅小姐!!\x02", ) ) CloseMessageWindow() OP_93(0x10, 0x10E, 0x1F4) #C0054 ChrTalk( 0x10, "#13400F#12P啊哈哈,普普通通啦。\x02", ) CloseMessageWindow() #C0055 ChrTalk( 0x13, ( "#12906F#12P#N哎呀呀,\x01", "竟然选择正面强攻,难道你认为自己\x01", "可以胜过他们二人的身体能力吗?\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() def lambda_27F5(): OP_93(0x11, 0xB4, 0x1F4) ExitThread() QueueWorkItem(0x11, 0, lambda_27F5) Sleep(30) def lambda_2805(): OP_93(0x10, 0xB4, 0x1F4) ExitThread() QueueWorkItem(0x10, 0, lambda_2805) Sleep(30) WaitChrThread(0x11, 0) WaitChrThread(0x10, 0) OP_93(0x101, 0x87, 0x1F4) #C0056 ChrTalk( 0x101, ( "#12506F#5P唉,真丢脸……\x02\x03", "#12505F……话说回来,瓦吉,\x01", "你刚才突然说『边界』,\x01", "到底是什么意思?\x02", ) ) CloseMessageWindow() #C0057 ChrTalk( 0x13, ( "#12900F#12P#N哦,我当时就料到\x01", "伊莉娅小姐会发起拦截了,\x01", "所以觉得正面强攻肯定徒劳无功。\x02\x03", "#12904F『边界』就是边界线……\x01", "也就是说,让你击出弧线吊球,\x01", "使球落到后场边界线处。\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() OP_63(0x101, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1) Sound(23, 0, 100, 0) Sleep(1000) #C0058 ChrTalk( 0x101, ( "#12506F#5P这……你突然那样说,\x01", "又有谁能听得懂啊!\x02", ) ) CloseMessageWindow() #C0059 ChrTalk( 0x13, ( "#12909F#12P#N啊哈哈,抱歉抱歉。\x02\x03", "#12900F好啦,尽快调整情绪,\x01", "想办法逆转比分吧。\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() FadeToDark(1000, 0, -1) OP_0D() Sound(909, 0, 70, 0) Sleep(500) SetChrChipByIndex(0x101, 0x1F) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x13, 0x2E) SetChrSubChip(0x13, 0x0) SetChrChipByIndex(0x11, 0x17) SetChrSubChip(0x11, 0x0) SetChrChipByIndex(0x10, 0x18) SetChrSubChip(0x10, 0x0) SetChrPos(0x101, 24500, -6000, -19000, 0) SetChrPos(0x13, 27500, -6000, -20000, 0) SetChrPos(0x11, 24500, -6000, -12000, 180) SetChrPos(0x10, 27500, -6000, -13000, 180) SetChrFlags(0x14, 0x8) OP_68(26000, -5000, -16000, 0) MoveCamera(320, 20, 0, 0) OP_6E(650, 0) SetCameraDistance(16000, 0) SetCameraDistance(17000, 1500) FadeToBright(1000, 0) OP_6F(0x79) OP_0D() #C0060 ChrTalk( 0x12, ( "#13000F#5P……比赛结束!!\x02\x03", "#13009F4比12,\x01", "伊莉娅小姐队获胜!!\x02", ) ) CloseMessageWindow() #C0061 ChrTalk( 0x101, "#12506F#6P呼,完败啊……\x02", ) CloseMessageWindow() Jump("loc_2E8F") label("loc_2B08") OP_2C(0xA5, 0x1) BeginChrThread(0x101, 3, 1, 66) WaitChrThread(0x101, 3) Sound(909, 0, 70, 0) Sleep(500) #C0062 ChrTalk( 0x12, "#13002F#5P#N罗伊德警官队得分!\x02", ) CloseMessageWindow() OP_57(0x0) OP_5A() SetChrChipByIndex(0x101, 0xFF) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x13, 0xA) SetChrSubChip(0x13, 0x0) SetChrChipByIndex(0x11, 0x8) SetChrSubChip(0x11, 0x0) SetChrChipByIndex(0x10, 0x3) SetChrSubChip(0x10, 0x0) #C0063 ChrTalk( 0x101, "#12500F#6P好……!\x02", ) CloseMessageWindow() #C0064 ChrTalk( 0x10, ( "#13406F#11P哎呀呀~居然是吊球……\x01", "本以为他们肯定会选择正面强攻呢。\x02", ) ) CloseMessageWindow() #C0065 ChrTalk( 0x11, "#12806F#11P唉~被对手识破了呢。\x02", ) CloseMessageWindow() #C0066 ChrTalk( 0x13, "#12902F#6P#N呵呵,干得不错嘛。\x02", ) CloseMessageWindow() OP_57(0x0) OP_5A() OP_63(0x101, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1) Sleep(1000) OP_93(0x101, 0x87, 0x1F4) #C0067 ChrTalk( 0x101, ( "#12504F#11P……嗯,因为你对我\x01", "喊了暗号──『边界』。\x02\x03", "#12502F我察觉到对方的后场有空位,\x01", "所以立刻改打吊球了。\x02", ) ) CloseMessageWindow() #C0068 ChrTalk( 0x13, ( "#12904F#6P#N呵呵,突然喊出那种暗号,\x01", "原本还有些不安,担心你无法领会呢。\x02\x03", "#12909F这大概就是爱的力量吧?\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() #C0069 ChrTalk( 0x101, ( "#12506F#11P少说蠢话了。\x02\x03", "#12500F好,继续用这种战术扰乱\x01", "伊莉娅小姐他们的视线吧!\x02", ) ) CloseMessageWindow() FadeToDark(1000, 0, -1) OP_0D() Sound(909, 0, 70, 0) Sleep(500) SetChrChipByIndex(0x101, 0x1F) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x13, 0x2E) SetChrSubChip(0x13, 0x0) SetChrChipByIndex(0x11, 0x17) SetChrSubChip(0x11, 0x0) SetChrChipByIndex(0x10, 0x18) SetChrSubChip(0x10, 0x0) SetChrPos(0x101, 24500, -6000, -19000, 0) SetChrPos(0x13, 27500, -6000, -20000, 0) SetChrPos(0x11, 24500, -6000, -12000, 180) SetChrPos(0x10, 27500, -6000, -13000, 180) SetChrFlags(0x14, 0x8) OP_68(26000, -5000, -16000, 0) MoveCamera(320, 20, 0, 0) OP_6E(650, 0) SetCameraDistance(16000, 0) SetCameraDistance(17000, 1500) FadeToBright(1000, 0) OP_6F(0x79) OP_0D() #C0070 ChrTalk( 0x12, ( "#13000F#5P……比赛结束!!\x02\x03", "#13009F12比10,\x01", "罗伊德警官队获胜!!\x02", ) ) CloseMessageWindow() #C0071 ChrTalk( 0x101, "#12509F#6P好!总算赢了!!\x02", ) CloseMessageWindow() label("loc_2E8F") FadeToDark(1000, 0, -1) OP_0D() Return() # Function_49_2538 end def Function_50_2E9B(): pass label("Function_50_2E9B") ClearChrFlags(0x14, 0x8) Sound(802, 0, 60, 0) def lambda_2EAB(): OP_9C(0xFE, 0x0, 0x0, 0x0, 0x5DC, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_2EAB) SetChrChipByIndex(0xFE, 0x2D) SetChrSubChip(0xFE, 0x0) Sleep(1000) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 100, 0) #C0072 ChrTalk( 0x13, "#6P#5A呼……!\x02", ) #Auto def lambda_2EF0(): OP_9D(0xFE, 0x6B6C, 0xFFFFEA84, 0xFFFFCC0C, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_2EF0) BeginChrThread(0x10, 3, 1, 51) Sleep(500) SetChrChipByIndex(0xFE, 0xA) SetChrSubChip(0xFE, 0x0) OP_9B(0x0, 0xFE, 0x0, 0xBB8, 0xFA0, 0x0) SetChrChipByIndex(0xFE, 0x2E) SetChrSubChip(0xFE, 0x0) Return() # Function_50_2E9B end def Function_51_2F31(): pass label("Function_51_2F31") WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 80, 0) def lambda_2F44(): OP_9D(0xFE, 0x5FB4, 0xFFFFEDA4, 0xFFFFCD38, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_2F44) BeginChrThread(0x11, 3, 1, 52) Sleep(500) SetChrSubChip(0xFE, 0x0) Return() # Function_51_2F31 end def Function_52_2F6A(): pass label("Function_52_2F6A") SetChrFlags(0xFE, 0x20) def lambda_2F74(): label("loc_2F74") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_2F74") QueueWorkItem2(0xFE, 2, lambda_2F74) Sleep(350) SetChrChipByIndex(0xFE, 0x25) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) Sound(441, 0, 60, 0) SetChrSubChip(0xFE, 0x1) def lambda_2F9F(): OP_9D(0xFE, 0x6B6C, 0xFFFFF448, 0xFFFFC75C, 0xAF0, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_2F9F) BeginChrThread(0x10, 3, 1, 53) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x17) SetChrSubChip(0xFE, 0x0) Sleep(500) SetChrChipByIndex(0xFE, 0x8) SetChrSubChip(0xFE, 0x0) OP_93(0xFE, 0xB4, 0x1F4) SetChrChipByIndex(0xFE, 0x17) SetChrSubChip(0xFE, 0x0) Return() # Function_52_2F6A end def Function_53_2FEC(): pass label("Function_53_2FEC") Sleep(500) ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x35) SetChrSubChip(0xFE, 0x0) Sound(809, 0, 100, 0) def lambda_3007(): OP_9D(0xFE, 0x6B6C, 0xFFFFE890, 0xFFFFC568, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_3007) Sleep(600) SetChrSubChip(0xFE, 0x1) Sound(442, 0, 80, 0) EndChrThread(0x14, 0x1) #C0073 ChrTalk( 0x10, "#11P#5A接球!\x02", ) #Auto def lambda_3047(): OP_96(0xFE, 0x5FB4, 0xFFFFEA84, 0xFFFFB6F4, 0x3A98, 0x0) ExitThread() QueueWorkItem(0x14, 1, lambda_3047) BeginChrThread(0x101, 3, 1, 54) Sleep(100) SetChrSubChip(0xFE, 0x2) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x18) SetChrSubChip(0xFE, 0x0) Return() # Function_53_2FEC end def Function_54_307E(): pass label("Function_54_307E") WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 80, 0) def lambda_3091(): OP_9D(0xFE, 0x6B6C, 0xFFFFED40, 0xFFFFB5C8, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_3091) BeginChrThread(0x13, 3, 1, 55) Sleep(500) SetChrSubChip(0xFE, 0x0) Return() # Function_54_307E end def Function_55_30B7(): pass label("Function_55_30B7") SetChrFlags(0xFE, 0x20) def lambda_30C1(): label("loc_30C1") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_30C1") QueueWorkItem2(0xFE, 2, lambda_30C1) Sleep(350) SetChrChipByIndex(0xFE, 0x1C) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) Sound(441, 0, 80, 0) SetChrSubChip(0xFE, 0x1) def lambda_30EC(): OP_9D(0xFE, 0x5FB4, 0xFFFFF448, 0xFFFFBBA4, 0xAF0, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_30EC) BeginChrThread(0x101, 3, 1, 56) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x2E) SetChrSubChip(0xFE, 0x0) Return() # Function_55_30B7 end def Function_56_311F(): pass label("Function_56_311F") Sleep(500) ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x21) SetChrSubChip(0xFE, 0x0) Sound(809, 0, 100, 0) def lambda_313A(): OP_9D(0xFE, 0x5FB4, 0xFFFFE890, 0xFFFFBD98, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_313A) Sleep(600) SetChrSubChip(0xFE, 0x1) EndChrThread(0x14, 0x1) Sound(442, 0, 80, 0) #C0074 ChrTalk( 0x101, "#5P#5A回敬你们……!\x02", ) #Auto def lambda_3181(): OP_96(0xFE, 0x5FB4, 0xFFFFEA84, 0xFFFFCC0C, 0x3A98, 0x0) ExitThread() QueueWorkItem(0x14, 1, lambda_3181) BeginChrThread(0x11, 3, 1, 57) Sleep(100) SetChrSubChip(0xFE, 0x2) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) Return() # Function_56_311F end def Function_57_31B8(): pass label("Function_57_31B8") WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 100, 0) def lambda_31CB(): OP_9D(0xFE, 0x6B6C, 0xFFFFEC78, 0xFFFFC568, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_31CB) BeginChrThread(0x10, 3, 1, 58) Sleep(500) SetChrSubChip(0xFE, 0x0) Return() # Function_57_31B8 end def Function_58_31F1(): pass label("Function_58_31F1") SetChrFlags(0xFE, 0x20) def lambda_31FB(): label("loc_31FB") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_31FB") QueueWorkItem2(0xFE, 2, lambda_31FB) Sleep(350) SetChrChipByIndex(0xFE, 0x34) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) Sound(441, 0, 80, 0) SetChrSubChip(0xFE, 0x1) def lambda_3226(): OP_9D(0xFE, 0x5FB4, 0xFFFFF448, 0xFFFFC75C, 0xAF0, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_3226) BeginChrThread(0x11, 3, 1, 59) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x18) SetChrSubChip(0xFE, 0x0) Sleep(500) SetChrChipByIndex(0xFE, 0x3) SetChrSubChip(0xFE, 0x0) OP_93(0xFE, 0xB4, 0x1F4) SetChrChipByIndex(0xFE, 0x18) SetChrSubChip(0xFE, 0x0) Return() # Function_58_31F1 end def Function_59_3273(): pass label("Function_59_3273") Sleep(500) ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x26) SetChrSubChip(0xFE, 0x0) Sound(809, 0, 100, 0) def lambda_328E(): OP_9D(0xFE, 0x5FB4, 0xFFFFE890, 0xFFFFC568, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_328E) Sleep(600) SetChrSubChip(0xFE, 0x1) EndChrThread(0x14, 0x1) #C0075 ChrTalk( 0x11, "#11P#5A看招!\x02", ) #Auto def lambda_32C8(): OP_96(0xFE, 0x5FB4, 0xFFFFF254, 0xFFFFBF8C, 0x2EE0, 0x0) ExitThread() QueueWorkItem(0x14, 1, lambda_32C8) BeginChrThread(0x101, 3, 1, 60) Sleep(100) Sound(442, 0, 100, 0) SetChrSubChip(0xFE, 0x2) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x17) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) Return() # Function_59_3273 end def Function_60_330B(): pass label("Function_60_330B") SetChrChipByIndex(0xFE, 0x22) SetChrSubChip(0xFE, 0x0) ClearChrFlags(0xFE, 0x1) Sound(809, 0, 100, 0) def lambda_3323(): OP_9C(0xFE, 0x0, 0x0, 0x0, 0x5DC, 0x1388) ExitThread() QueueWorkItem(0xFE, 1, lambda_3323) WaitChrThread(0x14, 1) Sound(441, 0, 60, 0) def lambda_334A(): OP_9D(0xFE, 0x6B6C, 0xFFFFED40, 0xFFFFB5C8, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_334A) BeginChrThread(0x13, 3, 1, 61) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) Sleep(500) #C0076 ChrTalk( 0x13, "#6P#5A边界!\x02", ) #Auto Sleep(500) SetChrFlags(0x10, 0x20) OP_93(0x10, 0xB4, 0x1F4) ClearChrFlags(0x10, 0x20) SetChrChipByIndex(0x10, 0x3) SetChrSubChip(0x10, 0x0) OP_98(0x10, 0xFFFFFA24, 0x0, 0x0, 0xFA0, 0x0) SetChrChipByIndex(0x10, 0x18) SetChrSubChip(0x10, 0x0) Return() # Function_60_330B end def Function_61_33C6(): pass label("Function_61_33C6") SetChrFlags(0xFE, 0x20) def lambda_33D0(): label("loc_33D0") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_33D0") QueueWorkItem2(0xFE, 2, lambda_33D0) Sleep(350) SetChrChipByIndex(0xFE, 0x1C) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 100, 0) def lambda_33FB(): OP_9D(0xFE, 0x5FB4, 0xFFFFF448, 0xFFFFBF8C, 0xAF0, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_33FB) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x2E) SetChrSubChip(0xFE, 0x0) RunExpression(0x3, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Return() # Function_61_33C6 end def Function_62_3432(): pass label("Function_62_3432") ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x21) SetChrSubChip(0xFE, 0x0) Sound(809, 0, 100, 0) def lambda_344A(): OP_9D(0xFE, 0x5FB4, 0xFFFFE890, 0xFFFFBD98, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_344A) Sleep(600) #C0077 ChrTalk( 0x101, "#5P#5A#4S……呼!#3S\x02", ) #Auto SetChrSubChip(0xFE, 0x1) EndChrThread(0x14, 0x1) Sound(442, 0, 100, 0) Sound(547, 0, 40, 0) OP_82(0x64, 0x0, 0xBB8, 0x96) PlayEffect(0x0, 0xFF, 0xFE, 0x5, 0, 700, 1000, 0, 0, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0) def lambda_34DF(): OP_96(0xFE, 0x6590, 0xFFFFF060, 0xFFFFC374, 0x2EE0, 0x0) ExitThread() QueueWorkItem(0x14, 1, lambda_34DF) BeginChrThread(0x11, 3, 1, 64) Sleep(30) BeginChrThread(0x10, 3, 1, 63) Sleep(100) SetChrSubChip(0xFE, 0x2) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) WaitChrThread(0x10, 3) Return() # Function_62_3432 end def Function_63_3529(): pass label("Function_63_3529") SetChrChipByIndex(0xFE, 0x36) SetChrSubChip(0xFE, 0x0) ClearChrFlags(0xFE, 0x1) Sound(809, 0, 100, 0) def lambda_3541(): OP_9C(0xFE, 0x0, 0x0, 0x0, 0x5DC, 0x1388) ExitThread() QueueWorkItem(0xFE, 1, lambda_3541) WaitChrThread(0x14, 1) Sound(441, 0, 80, 0) BeginChrThread(0x14, 3, 1, 65) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x18) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) WaitChrThread(0x14, 3) Return() # Function_63_3529 end def Function_64_3585(): pass label("Function_64_3585") SetChrChipByIndex(0xFE, 0x27) SetChrSubChip(0xFE, 0x0) ClearChrFlags(0xFE, 0x1) OP_9C(0xFE, 0x0, 0x0, 0x0, 0x5DC, 0x1388) Sound(809, 0, 100, 0) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x17) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) Return() # Function_64_3585 end def Function_65_35C3(): pass label("Function_65_35C3") SetChrChip(0x0, 0xFE, 0x1E, 0x12C) OP_96(0xFE, 0x6FB8, 0xFFFFE890, 0xFFFFBA00, 0x3A98, 0x0) Sound(443, 0, 100, 0) Sound(441, 0, 100, 0) OP_9D(0xFE, 0x7CC4, 0xFFFFE890, 0xFFFFAB50, 0xBB8, 0x3E8) SetChrChip(0x1, 0xFE, 0x0, 0x0) Return() # Function_65_35C3 end def Function_66_360B(): pass label("Function_66_360B") ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x21) SetChrSubChip(0xFE, 0x0) Sound(809, 0, 100, 0) def lambda_3623(): OP_9D(0xFE, 0x5FB4, 0xFFFFE890, 0xFFFFBD98, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_3623) Sleep(600) #C0078 ChrTalk( 0x101, "#6P#5A#4S……嘿!#3S\x02", ) #Auto SetChrSubChip(0xFE, 0x1) EndChrThread(0x14, 0x1) Sound(441, 0, 100, 0) BeginChrThread(0x14, 3, 1, 68) BeginChrThread(0x11, 3, 1, 64) Sleep(30) BeginChrThread(0x10, 3, 1, 67) Sleep(100) SetChrSubChip(0xFE, 0x2) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) WaitChrThread(0x14, 3) Return() # Function_66_360B end def Function_67_36A0(): pass label("Function_67_36A0") SetChrChipByIndex(0xFE, 0x36) SetChrSubChip(0xFE, 0x0) ClearChrFlags(0xFE, 0x1) Sound(809, 0, 100, 0) OP_9C(0xFE, 0x0, 0x0, 0x0, 0x5DC, 0x1388) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x18) Sound(30, 0, 100, 0) SetChrSubChip(0xFE, 0x0) Return() # Function_67_36A0 end def Function_68_36DE(): pass label("Function_68_36DE") SetChrChip(0x0, 0xFE, 0x1E, 0x12C) OP_9D(0xFE, 0x639C, 0xFFFFE890, 0xFFFFD508, 0x7D0, 0x3E8) Sound(443, 0, 50, 0) Sound(441, 0, 80, 0) OP_9D(0xFE, 0x639C, 0xFFFFE890, 0xFFFFDAE4, 0x3E8, 0x3E8) Sound(441, 0, 80, 0) OP_9D(0xFE, 0x639C, 0xFFFFE890, 0xFFFFDECC, 0x1F4, 0x3E8) Sound(441, 0, 60, 0) OP_9D(0xFE, 0x639C, 0xFFFFE890, 0xFFFFE0C0, 0xC8, 0x3E8) SetChrChip(0x1, 0xFE, 0x0, 0x0) Return() # Function_68_36DE end def Function_69_3763(): pass label("Function_69_3763") OP_50(0x6C, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_ADD_SAVE), scpexpr(EXPR_END))) SetChrPos(0x101, 24500, -6000, -22000, 0) SetChrPos(0x10, 27500, -6000, -19000, 0) SetChrPos(0x11, 24500, -6000, -13000, 180) SetChrPos(0x12, 27500, -6000, -13000, 180) SetChrPos(0x13, 21300, -6000, -16000, 90) SetChrPos(0x14, 24500, -5500, -21800, 0) SetChrFlags(0x14, 0x8) SetChrChipByIndex(0x101, 0xFF) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x10, 0x18) SetChrSubChip(0x10, 0x0) SetChrChipByIndex(0x11, 0x17) SetChrSubChip(0x11, 0x0) SetChrChipByIndex(0x12, 0x29) SetChrSubChip(0x12, 0x0) OP_68(26000, -5000, -16000, 0) MoveCamera(320, 20, 0, 0) OP_6E(650, 0) SetCameraDistance(17000, 0) OP_68(26000, -5000, -16000, 15000) MoveCamera(305, 30, 0, 15000) FadeToBright(1000, 0) OP_0D() RunExpression(0x3, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) BeginChrThread(0x101, 3, 1, 70) label("loc_385B") Jc((scpexpr(EXPR_GET_RESULT, 0x3), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_3872") Sleep(1) Jump("loc_385B") label("loc_3872") OP_4B(0x14, 0xFF) RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) FadeToDark(300, 0, 100) OP_0D() Menu( 0, -1, -1, 0, ( "全力将球托高\x01", # 0 "控制力度托球\x01", # 1 ) ) MenuEnd(0x0) OP_60(0x0) FadeToBright(300, 0) OP_0D() OP_4C(0x14, 0xFF) Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_3C56") OP_2C(0xA5, 0x1) MoveCamera(315, 25, 0, 2000) SetCameraDistance(15000, 2000) BeginChrThread(0x101, 3, 1, 81) WaitChrThread(0x101, 3) Sound(909, 0, 70, 0) Sleep(500) #C0079 ChrTalk( 0x13, "#12902F#5P#N呵呵,打得漂亮。\x02", ) CloseMessageWindow() OP_57(0x0) OP_5A() SetChrChipByIndex(0x101, 0xFF) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x10, 0x3) SetChrSubChip(0x10, 0x0) SetChrChipByIndex(0x11, 0x8) SetChrSubChip(0x11, 0x0) SetChrChipByIndex(0x12, 0x9) SetChrSubChip(0x12, 0x0) OP_63(0x11, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1) Sound(23, 0, 100, 0) OP_63(0x12, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1) Sound(23, 0, 100, 0) Sleep(1000) #C0080 ChrTalk( 0x11, ( "#12806F真、真不愧是伊莉娅小姐……\x01", "那种身体能力简直就是犯规啊。\x02", ) ) CloseMessageWindow() #C0081 ChrTalk( 0x12, ( "#13006F在那种惊人的高度扣球……\x01", "我们根本不可能挡得住呢。\x02", ) ) CloseMessageWindow() OP_63(0x10, 0x0, 2000, 0x26, 0x27, 0xFA, 0x2) Sleep(1200) #C0082 ChrTalk( 0x10, "#13409F#6P呵呵,随意一击罢了。\x02", ) CloseMessageWindow() OP_93(0x10, 0xE1, 0x1F4) #C0083 ChrTalk( 0x10, ( "#13400F#12P警察弟弟的判断很出色呢,\x01", "竟然把球托得那么高。\x02", ) ) CloseMessageWindow() #C0084 ChrTalk( 0x101, ( "#12500F#5P哈哈,因为我觉得凭伊莉娅小姐\x01", "的身体能力,肯定能接到那一球。\x02", ) ) CloseMessageWindow() #C0085 ChrTalk( 0x10, ( "#13409F#12P呵呵,聪明聪明¤\x02\x03", "#13400F好,我们就保持这种势头,\x01", "一鼓作气结束比赛吧!\x02", ) ) CloseMessageWindow() FadeToDark(1000, 0, -1) OP_0D() Sound(909, 0, 70, 0) Sleep(500) SetChrChipByIndex(0x101, 0x1F) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x10, 0x18) SetChrSubChip(0x10, 0x0) SetChrChipByIndex(0x11, 0x17) SetChrSubChip(0x11, 0x0) SetChrChipByIndex(0x12, 0x29) SetChrSubChip(0x12, 0x0) SetChrPos(0x101, 24500, -6000, -19000, 0) SetChrPos(0x10, 27500, -6000, -20000, 0) SetChrPos(0x11, 24500, -6000, -12000, 180) SetChrPos(0x12, 27500, -6000, -13000, 180) SetChrFlags(0x14, 0x8) OP_68(26000, -5000, -16000, 0) MoveCamera(320, 20, 0, 0) OP_6E(650, 0) SetCameraDistance(16000, 0) SetCameraDistance(17000, 1500) FadeToBright(1000, 0) OP_6F(0x79) OP_0D() #C0086 ChrTalk( 0x13, ( "#12900F#5P#N……比赛结束!!\x02\x03", "#12904F12比4,\x01", "罗伊德队获胜!!\x01", "呵呵,辛苦啦。\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() #C0087 ChrTalk( 0x101, "#12509F#6P好!压倒性完胜!!\x02", ) CloseMessageWindow() Jump("loc_3F74") label("loc_3C56") MoveCamera(315, 25, 0, 2000) SetCameraDistance(15000, 2000) BeginChrThread(0x101, 3, 1, 86) WaitChrThread(0x101, 3) Sound(909, 0, 70, 0) Sleep(500) #C0088 ChrTalk( 0x13, ( "#12902F#5P#N呵呵,真遗憾,\x01", "没能把握住机会呢。\x02", ) ) CloseMessageWindow() OP_57(0x0) OP_5A() SetChrChipByIndex(0x101, 0xFF) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x10, 0x3) SetChrSubChip(0x10, 0x0) SetChrChipByIndex(0x11, 0x8) SetChrSubChip(0x11, 0x0) SetChrChipByIndex(0x12, 0x9) SetChrSubChip(0x12, 0x0) #C0089 ChrTalk( 0x11, "#12806F呼,好危险……!\x02", ) CloseMessageWindow() #C0090 ChrTalk( 0x12, ( "#13011F不过,弹跳力好惊人啊……\x02\x03", "#13006F如果真能在那种高度击出扣球,\x01", "我们肯定接不下。\x02", ) ) CloseMessageWindow() OP_63(0x101, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1) Sound(23, 0, 100, 0) OP_63(0x10, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1) Sound(23, 0, 100, 0) Sleep(1000) OP_93(0x10, 0xE1, 0x1F4) #C0091 ChrTalk( 0x10, "#13406F#12P哎呀呀……抱歉哦,警察弟弟。\x02", ) CloseMessageWindow() #C0092 ChrTalk( 0x101, ( "#12512F哪里,是我的失误……\x01", "没想到你竟然\x01", "能跳那么高。\x02", ) ) CloseMessageWindow() #C0093 ChrTalk( 0x11, ( "#12800F哈哈,看来女神\x01", "站在我们这边呢。\x01", "一鼓作气取得胜利吧!诺艾尔!\x02", ) ) CloseMessageWindow() #C0094 ChrTalk( 0x12, "#13009F是!\x02", ) CloseMessageWindow() FadeToDark(1000, 0, -1) OP_0D() Sound(909, 0, 70, 0) Sleep(500) SetChrChipByIndex(0x101, 0x1F) SetChrSubChip(0x101, 0x0) SetChrChipByIndex(0x10, 0x18) SetChrSubChip(0x10, 0x0) SetChrChipByIndex(0x11, 0x17) SetChrSubChip(0x11, 0x0) SetChrChipByIndex(0x12, 0x29) SetChrSubChip(0x12, 0x0) SetChrPos(0x101, 24500, -6000, -19000, 0) SetChrPos(0x10, 27500, -6000, -20000, 0) SetChrPos(0x11, 24500, -6000, -12000, 180) SetChrPos(0x12, 27500, -6000, -13000, 180) SetChrFlags(0x14, 0x8) OP_68(26000, -5000, -16000, 0) MoveCamera(320, 20, 0, 0) OP_6E(650, 0) SetCameraDistance(16000, 0) SetCameraDistance(17000, 1500) FadeToBright(1000, 0) OP_6F(0x79) OP_0D() #C0095 ChrTalk( 0x13, ( "#12900F#5P……比赛结束!!\x02\x03", "#12904F9比12,\x01", "兰迪队获胜。\x01", "呵呵,辛苦啦。\x02", ) ) CloseMessageWindow() #C0096 ChrTalk( 0x101, "#12506F#6P呜,输了呢……\x02", ) CloseMessageWindow() label("loc_3F74") FadeToDark(1000, 0, -1) OP_0D() Return() # Function_69_3763 end def Function_70_3F80(): pass label("Function_70_3F80") ClearChrFlags(0x14, 0x8) Sound(802, 0, 60, 0) def lambda_3F90(): OP_9C(0xFE, 0x0, 0x0, 0x0, 0x5DC, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_3F90) SetChrChipByIndex(0xFE, 0x1E) SetChrSubChip(0xFE, 0x0) Sleep(1000) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 100, 0) #C0097 ChrTalk( 0x101, "#5P#5A……嘿!\x02", ) #Auto def lambda_3FD5(): OP_9D(0xFE, 0x5FB4, 0xFFFFEA84, 0xFFFFCC70, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_3FD5) BeginChrThread(0x11, 3, 1, 71) Sleep(500) SetChrChipByIndex(0xFE, 0xFF) SetChrSubChip(0xFE, 0x0) OP_9B(0x0, 0xFE, 0x0, 0xBB8, 0xFA0, 0x0) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) Return() # Function_70_3F80 end def Function_71_4016(): pass label("Function_71_4016") WaitChrThread(0x14, 1) Sound(441, 0, 100, 0) SetChrSubChip(0xFE, 0x1) def lambda_4029(): OP_9D(0xFE, 0x6AA4, 0xFFFFEC78, 0xFFFFCD38, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_4029) BeginChrThread(0x12, 3, 1, 72) Sleep(500) SetChrSubChip(0xFE, 0x0) Return() # Function_71_4016 end def Function_72_404F(): pass label("Function_72_404F") SetChrFlags(0xFE, 0x20) def lambda_4059(): label("loc_4059") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_4059") QueueWorkItem2(0xFE, 2, lambda_4059) Sleep(350) SetChrChipByIndex(0xFE, 0x15) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) Sound(441, 0, 80, 0) SetChrSubChip(0xFE, 0x1) def lambda_4084(): OP_9D(0xFE, 0x5FB4, 0xFFFFF448, 0xFFFFC75C, 0xAF0, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_4084) BeginChrThread(0x11, 3, 1, 73) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x29) SetChrSubChip(0xFE, 0x0) SetChrChipByIndex(0x10, 0x3) SetChrSubChip(0x10, 0x0) OP_93(0xFE, 0xB4, 0x1F4) OP_9B(0x1, 0x10, 0x0, 0xFFFFFA24, 0xFA0, 0x0) SetChrChipByIndex(0x10, 0x18) SetChrSubChip(0x10, 0x0) Return() # Function_72_404F end def Function_73_40DD(): pass label("Function_73_40DD") Sleep(500) ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x26) SetChrSubChip(0xFE, 0x0) #C0098 ChrTalk( 0x11, "#11P#5A速攻!\x02", ) #Auto Sound(809, 0, 100, 0) def lambda_410A(): OP_9D(0xFE, 0x5FB4, 0xFFFFE890, 0xFFFFC568, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_410A) Sleep(600) SetChrSubChip(0xFE, 0x1) EndChrThread(0x14, 0x1) def lambda_4132(): OP_96(0xFE, 0x5FB4, 0xFFFFEA84, 0xFFFFB6F4, 0x4E20, 0x0) ExitThread() QueueWorkItem(0x14, 1, lambda_4132) BeginChrThread(0x101, 3, 1, 74) Sleep(100) Sound(442, 0, 80, 0) SetChrSubChip(0xFE, 0x2) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x17) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) Return() # Function_73_40DD end def Function_74_4175(): pass label("Function_74_4175") WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) def lambda_4182(): OP_9D(0xFE, 0x6B6C, 0xFFFFEA84, 0xFFFFB0B4, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_4182) BeginChrThread(0x10, 3, 1, 75) Sleep(500) SetChrSubChip(0xFE, 0x0) Return() # Function_74_4175 end def Function_75_41A8(): pass label("Function_75_41A8") WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 80, 0) #C0099 ChrTalk( 0x10, "#6P#5A嘿!\x02", ) #Auto def lambda_41CA(): OP_9D(0xFE, 0x5FB4, 0xFFFFEC78, 0xFFFFB5C8, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_41CA) BeginChrThread(0x101, 3, 1, 76) Sleep(500) SetChrSubChip(0xFE, 0x0) Return() # Function_75_41A8 end def Function_76_41F0(): pass label("Function_76_41F0") SetChrFlags(0xFE, 0x20) def lambda_41FA(): label("loc_41FA") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_41FA") QueueWorkItem2(0xFE, 2, lambda_41FA) Sleep(350) SetChrChipByIndex(0xFE, 0x20) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 60, 0) def lambda_4225(): OP_9D(0xFE, 0x6B6C, 0xFFFFEA84, 0xFFFFCC0C, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_4225) BeginChrThread(0x12, 3, 1, 77) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) Return() # Function_76_41F0 end def Function_77_4258(): pass label("Function_77_4258") WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 80, 0) def lambda_426B(): OP_9D(0xFE, 0x5FB4, 0xFFFFEC78, 0xFFFFC568, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_426B) BeginChrThread(0x11, 3, 1, 78) Sleep(500) SetChrSubChip(0xFE, 0x0) Return() # Function_77_4258 end def Function_78_4291(): pass label("Function_78_4291") SetChrFlags(0xFE, 0x20) def lambda_429B(): label("loc_429B") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_429B") QueueWorkItem2(0xFE, 2, lambda_429B) Sleep(350) SetChrChipByIndex(0xFE, 0x25) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 100, 0) #C0100 ChrTalk( 0x11, "#11P#5A上了!\x02", ) #Auto def lambda_42D8(): OP_9D(0xFE, 0x6B6C, 0xFFFFF448, 0xFFFFC75C, 0xAF0, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_42D8) BeginChrThread(0x12, 3, 1, 79) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x17) SetChrSubChip(0xFE, 0x0) SetChrChipByIndex(0x10, 0x3) SetChrSubChip(0x10, 0x0) OP_9B(0x1, 0x10, 0x0, 0x5DC, 0xFA0, 0x0) SetChrChipByIndex(0x10, 0x18) SetChrSubChip(0x10, 0x0) SetChrFlags(0x11, 0x20) OP_93(0x11, 0xB4, 0x1F4) ClearChrFlags(0x11, 0x20) SetChrChipByIndex(0x11, 0x8) SetChrSubChip(0x11, 0x0) OP_98(0x11, 0x5DC, 0x0, 0x0, 0xFA0, 0x0) SetChrChipByIndex(0x11, 0x17) SetChrSubChip(0x11, 0x0) Return() # Function_78_4291 end def Function_79_435F(): pass label("Function_79_435F") Sleep(500) ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x2B) SetChrSubChip(0xFE, 0x0) Sound(809, 0, 100, 0) def lambda_437A(): OP_9D(0xFE, 0x6B6C, 0xFFFFE890, 0xFFFFC568, 0x7D0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_437A) Sleep(600) SetChrSubChip(0xFE, 0x1) EndChrThread(0x14, 0x1) #C0101 ChrTalk( 0x12, "#11P#5A这招如何!?\x02", ) #Auto Sound(442, 0, 80, 0) def lambda_43C0(): OP_96(0xFE, 0x6B6C, 0xFFFFEA84, 0xFFFFB6F4, 0x4E20, 0x0) ExitThread() QueueWorkItem(0x14, 1, lambda_43C0) BeginChrThread(0x10, 3, 1, 80) Sleep(100) SetChrSubChip(0xFE, 0x2) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x29) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) Return() # Function_79_435F end def Function_80_43FD(): pass label("Function_80_43FD") WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 100, 0) def lambda_4410(): OP_9D(0xFE, 0x5FB4, 0xFFFFEC78, 0xFFFFB5C8, 0xBB8, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_4410) SetChrFlags(0x101, 0x20) OP_93(0x101, 0x5A, 0x1F4) ClearChrFlags(0x101, 0x20) Sleep(300) SetChrSubChip(0xFE, 0x0) Sleep(200) EndChrThread(0x101, 0x2) RunExpression(0x3, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END))) Return() # Function_80_43FD end def Function_81_4452(): pass label("Function_81_4452") #C0102 ChrTalk( 0x101, "#5P#5A伊莉娅小姐!!\x02", ) #Auto SetChrFlags(0xFE, 0x20) def lambda_4475(): label("loc_4475") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_4475") QueueWorkItem2(0xFE, 2, lambda_4475) Sleep(350) SetChrChipByIndex(0xFE, 0x20) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 100, 0) def lambda_44A0(): OP_9D(0xFE, 0x6B6C, 0xFFFFF448, 0xFFFFBBA4, 0xCE4, 0x3E8) ExitThread() QueueWorkItem(0x14, 1, lambda_44A0) BeginChrThread(0x10, 3, 1, 82) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x10, 3) Return() # Function_81_4452 end def Function_82_44D7(): pass label("Function_82_44D7") Sleep(500) ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x35) SetChrSubChip(0xFE, 0x0) #C0103 ChrTalk( 0x10, "#5P#5A#4S噢啊啊啊啊啊!!#3S\x02", ) #Auto Sound(809, 0, 100, 0) def lambda_4513(): OP_9D(0xFE, 0x6B6C, 0xFFFFE890, 0xFFFFBD98, 0xAF0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_4513) Sleep(700) SetChrSubChip(0xFE, 0x1) EndChrThread(0x14, 0x1) Sound(442, 0, 100, 0) Sound(547, 0, 40, 0) OP_82(0x64, 0x0, 0xBB8, 0x96) PlayEffect(0x0, 0xFF, 0xFE, 0x5, 0, 700, 1000, 0, 0, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0) BeginChrThread(0x14, 3, 1, 85) BeginChrThread(0x11, 3, 1, 83) BeginChrThread(0x12, 3, 1, 84) Sleep(100) SetChrSubChip(0xFE, 0x2) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x18) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) WaitChrThread(0x14, 3) WaitChrThread(0x11, 3) WaitChrThread(0x12, 3) Return() # Function_82_44D7 end def Function_83_45CA(): pass label("Function_83_45CA") SetChrChipByIndex(0xFE, 0x27) SetChrSubChip(0xFE, 0x0) ClearChrFlags(0xFE, 0x1) OP_9C(0xFE, 0x0, 0x0, 0x0, 0x5DC, 0x7D0) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x17) SetChrSubChip(0xFE, 0x0) Return() # Function_83_45CA end def Function_84_45FC(): pass label("Function_84_45FC") SetChrChipByIndex(0xFE, 0x2C) SetChrSubChip(0xFE, 0x0) ClearChrFlags(0xFE, 0x1) OP_9C(0xFE, 0x0, 0x0, 0x0, 0x5DC, 0x7D0) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x29) SetChrSubChip(0xFE, 0x0) Return() # Function_84_45FC end def Function_85_462E(): pass label("Function_85_462E") SetChrChip(0x0, 0xFE, 0x1E, 0x12C) OP_96(0xFE, 0x64D2, 0xFFFFE890, 0xFFFFD29C, 0x61A8, 0x0) Sound(443, 0, 100, 0) Sound(441, 0, 80, 0) OP_9D(0xFE, 0x54A6, 0xFFFFE890, 0x46A, 0x7D0, 0x3E8) Sound(441, 0, 80, 0) OP_9D(0xFE, 0x48DA, 0xFFFFE890, 0x26B6, 0x3E8, 0x3E8) SetChrChip(0x1, 0xFE, 0x0, 0x0) Return() # Function_85_462E end def Function_86_4693(): pass label("Function_86_4693") #C0104 ChrTalk( 0x101, "#5P#5A伊莉娅小姐!!\x02", ) #Auto SetChrFlags(0xFE, 0x20) def lambda_46B6(): label("loc_46B6") TurnDirection(0xFE, 0x14, 500) Yield() Jump("loc_46B6") QueueWorkItem2(0xFE, 2, lambda_46B6) Sleep(350) SetChrChipByIndex(0xFE, 0x20) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 1) SetChrSubChip(0xFE, 0x1) Sound(441, 0, 60, 0) BeginChrThread(0x14, 3, 1, 88) BeginChrThread(0x10, 3, 1, 87) Sleep(500) EndChrThread(0xFE, 0x2) ClearChrFlags(0xFE, 0x20) SetChrChipByIndex(0xFE, 0x1F) SetChrSubChip(0xFE, 0x0) WaitChrThread(0x14, 3) WaitChrThread(0x10, 3) Return() # Function_86_4693 end def Function_87_4705(): pass label("Function_87_4705") Sleep(500) ClearChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x35) SetChrSubChip(0xFE, 0x0) #C0105 ChrTalk( 0x10, "#5P#20A#4S噢啊啊……#3S哎?哎呀!\x02", ) #Auto Sound(809, 0, 100, 0) def lambda_4745(): OP_9D(0xFE, 0x6B6C, 0xFFFFE890, 0xFFFFBD98, 0xAF0, 0x3E8) ExitThread() QueueWorkItem(0xFE, 1, lambda_4745) Sleep(700) SetChrSubChip(0xFE, 0x1) Sound(590, 0, 100, 0) BeginChrThread(0x11, 3, 1, 83) BeginChrThread(0x12, 3, 1, 84) Sleep(100) SetChrSubChip(0xFE, 0x2) Sleep(100) WaitChrThread(0xFE, 1) SetChrFlags(0xFE, 0x1) SetChrChipByIndex(0xFE, 0x18) SetChrSubChip(0xFE, 0x0) Sound(30, 0, 100, 0) WaitChrThread(0x11, 3) WaitChrThread(0x12, 3) Return() # Function_87_4705 end def Function_88_47A0(): pass label("Function_88_47A0") Sound(443, 0, 40, 0) Sound(441, 0, 80, 0) OP_9D(0xFE, 0x6B6C, 0xFFFFE890, 0xFFFFBD98, 0x898, 0x3E8) Sound(441, 0, 60, 0) OP_9D(0xFE, 0x6F54, 0xFFFFE890, 0xFFFFC091, 0x384, 0x3E8) Sound(441, 0, 60, 0) OP_9D(0xFE, 0x717A, 0xFFFFE890, 0xFFFFC1E4, 0x12C, 0x3E8) Return() # Function_88_47A0 end SaveToFile() Try(main)
import nltk from urllib.request import urlopen from nltk import word_tokenize Single_PMID = "24964572" PMIDs = ["28483577", "24964572", "27283605"] Pubtator_Info_URL = "https://www.ncbi.nlm.nih.gov/research/pubtator-api/publications/export/pubtator?pmids=24964572" Pubtator_Info = urlopen(Pubtator_Info_URL, None, timeout=100000) Title = [] Abstract = [] Bioconcept_Gene = [] Bioconcept_Disease = [] Bioconcept_Chemical = [] Bioconcept_Mutation = [] Bioconcept_Species = [] Bioconcept_Cellline = [] while True: Info_line = Pubtator_Info.readline() if not Info_line: break #Abstract tokenization if str(Info_line).__contains__("|a|"): print(Info_line) Abstract = str(Info_line).replace('\\n','').\ replace(str(Single_PMID),"").\ replace("|a|","").lower().\ replace('b"','').\ replace('"','').\ split(".") print(Abstract) #Abstract를 tokenization 진행 #tokens = word_tokenize(str(Abstract)) #print(tokens) #text = nltk.Text(tokens) #text.concordance("Garlic", 100, 100) print("\n") print("\n") # 두 키워드를 포함하는 sentence 추출 for info in Abstract: if str(info).__contains__(str("garlic").lower()) and str(info).__contains__(str("cancer").lower()): #print("Extraction") #print(str(info).strip()) Data = str(info).strip() print(Data) data_tokens = word_tokenize(str(Data)) print(data_tokens) inter_data_token = data_tokens[int(data_tokens.index('garlic'))+1:int(data_tokens.index('cancer'))-1] #Tokenization 진행 후 관련된 단어를 포함하는지에 대한 여부로 두 keyword 사이의 연관성을 비교 분석 key = "treatment" for inter_data in inter_data_token: print(inter_data) if str(inter_data) == str(key): correlation = True elif str(inter_data) == str(key): correlation = False print(correlation) """ #Bioconcept filtering (Gene, Disease, Chemical, Mutation, Species, Celline) #if str(Info_line).__contains__("Species"): # print(str(Info_line)) """
if 'c' in 'Python': print 'YES' else: print 'NO' #http://www.hacksparrow.com/python-check-if-a-character-or-substring-is-in-a-string.html
# -*- coding: utf-8 -*- # cython: language_level=3, always_allow_keywords=True ## Copyright 2007-2018 by LivingLogic AG, Bayreuth/Germany ## Copyright 2007-2018 by Walter Dörwald ## ## All Rights Reserved ## ## See ll/xist/__init__.py for the license """ This namespace module implements Atom 1.0 as specified by :rfc:`4287`. """ from ll.xist import xsc, sims from ll.xist.ns import html __docformat__ = "reStructuredText" xmlns = "http://www.w3.org/2005/Atom" class feed(xsc.Element): """ The :class:`feed` element is the document (i.e., top-level) element of an Atom Feed Document, acting as a container for metadata and data associated with the feed. """ xmlns = xmlns class entry(xsc.Element): """ The :class:`entry` element represents an individual entry, acting as a container for metadata and data associated with the entry. """ xmlns = xmlns class content(xsc.Element): """ The :class:`content` element either contains or links to the content of the :class:`entry`. """ xmlns = xmlns class Attrs(xsc.Element.Attrs): class type(xsc.TextAttr): pass class src(xsc.URLAttr): pass class author(xsc.Element): """ The :class:`author` element indicates the author of the :class:`entry` or :class:`feed`. """ xmlns = xmlns class category(xsc.Element): """ The :class:`category` element conveys information about a category associated with an :class:`entry` or :class:`feed`. """ xmlns = xmlns class Attrs(xsc.Element.Attrs): class term(xsc.TextAttr): required = True class scheme(xsc.URLAttr): pass class label(xsc.TextAttr): pass class contributor(xsc.Element): """ The :class:`contributor` element indicates a person or other entity who contributed :class:`entry` or :class:`feed`. """ xmlns = xmlns class generator(xsc.Element): """ The :class:`generator` element's content identifies the agent used to generate a feed, for debugging and other purposes. """ xmlns = xmlns class Attrs(xsc.Element.Attrs): class uri(xsc.URLAttr): pass class version(xsc.TextAttr): pass class icon(xsc.Element): """ The :class:`icon` element's content is an IRI reference that identifies an image that provides iconic visual identification for a feed. """ xmlns = xmlns class id(xsc.Element): """ The :class:`id` element conveys a permanent, universally unique identifier for an :class:`entry` or :class:`feed`. """ xmlns = xmlns class link(xsc.Element): """ The :class:`link` element defines a reference from an :class:`entry` or :class:`feed` to a Web resource. """ xmlns = xmlns class Attrs(xsc.Element.Attrs): class href(xsc.URLAttr): required = True class rel(xsc.TextAttr): pass class type(xsc.TextAttr): pass class hreflang(xsc.TextAttr): pass class title(xsc.TextAttr): pass class length(xsc.TextAttr): pass class logo(xsc.Element): """ The :class:`logo` element's content is an IRI reference that identifies an image that provides visual identification for a :class:`feed`. """ xmlns = xmlns class published(xsc.Element): """ The :class:`published` element indicatesg an instant in time associated with an event early in the life cycle of the :class:`entry`. """ xmlns = xmlns class rights(xsc.Element): """ The :class:`rights` element contains text that conveys information about rights held in and over an :class:`entry` or :class:`feed`. """ xmlns = xmlns class Attrs(xsc.Element.Attrs): class type(xsc.TextAttr): pass class source(xsc.Element): """ If an :class:`entry` is copied from one :class:`feed` into another :class:`feed`, then the source :class:`feed`'s metadata (all child elements of :class:`feed` other than the :class:`entry` elements) may be preserved within the copied entry by adding a :class:`source` child element, if it is not already present in the :class:`entry`, and including some or all of the source :class:`feed`'s Metadata elements as the :class:`source` element's children. """ xmlns = xmlns class subtitle(xsc.Element): """ The :class:`subtitle` element contains text that conveys a human-readable description or subtitle for a :class:`feed`. """ xmlns = xmlns class Attrs(xsc.Element.Attrs): class type(xsc.TextAttr): pass class summary(xsc.Element): """ The :class:`summary` element contains text that conveys a short summary, abstract, or excerpt of an entry. """ xmlns = xmlns class Attrs(xsc.Element.Attrs): class type(xsc.TextAttr): pass class title(xsc.Element): """ The :class:`title` element contains text that conveys a human-readable title for an :class:`entry` or :class:`feed`. """ xmlns = xmlns class Attrs(xsc.Element.Attrs): class type(xsc.TextAttr): pass class updated(xsc.Element): """ The :class:`updated` element contains a date indicating the most recent instant in time when an :class:`entry` or :class:`feed` was modified in a way the publisher considers significant. """ xmlns = xmlns class email(xsc.Element): """ The :class:`email` element's content conveys an e-mail address associated with the person. """ xmlns = xmlns class uri(xsc.Element): """ The :class:`uri` element's content conveys an IRI associated with the person. """ xmlns = xmlns class name(xsc.Element): """ The :class:`name` element's content conveys a human-readable name for the person. """ xmlns = xmlns link.model = \ category.model = sims.Empty() content.model = sims.ElementsOrText(html.div) source.model = sims.ElementsOrText(author, category, contributor, generator, icon, id, link, logo, rights, subtitle, title, updated) feed.model = sims.Elements(author, category, contributor, generator, icon, logo, id, link, rights, subtitle, title, updated, entry) entry.model = sims.Elements(author, category, content, contributor, id, link, published, rights, source, summary, title, updated) contributor.model = \ author.model = sims.Elements(name, uri, email) title.model = \ summary.model = \ subtitle.model = \ rights.model = sims.ElementsOrText(html.div) updated.model = \ published.model = \ logo.model = \ id.model = \ icon.model = \ generator.model = \ email.model = \ uri.model = \ name.model = sims.NoElements()