code
stringlengths
22
1.05M
apis
sequencelengths
1
3.31k
extract_api
stringlengths
75
3.25M
from django.conf import settings from django.db import models from .validators import validate_file_extension # Create your models here. class NormalProject(models.Model): name = models.CharField(max_length=100, null=False, unique=True) owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='owner_normalproject', on_delete=models.CASCADE) project_members = models.ManyToManyField(settings.AUTH_USER_MODEL) public_status = models.BooleanField(default=0) collab_status = models.BooleanField(default=0) timestamp = models.DateTimeField(auto_now_add=True) def is_public(self): return bool(self.public_status) def is_collab(self): return bool(self.collab_status) def get_project_members(self): return self.project_members def get_owner(self): return self.owner def set_status_public(self, status): self.public_status=status def set_status_collab(self, status): self.collab_status=status def __str__(self): return str(self.name) class Document(models.Model): file = models.FileField(blank=False, null=False, upload_to='mediafiles/', validators=[validate_file_extension]) name = models.CharField(max_length=100, null=False, unique=True) owner = models.ForeignKey(settings.AUTH_USER_MODEL,related_name='owner_document', on_delete=models.CASCADE) project = models.ForeignKey(NormalProject, related_name='project_document', on_delete=models.CASCADE) tagged_doc = models.CharField(max_length=100) timestamp = models.DateTimeField(auto_now_add=True) def set_tagged_doc(self, file_url): self.tagged_doc = file_url def set_file(self, file_url): self.file = file_url class NormalMetadata(models.Model): name = models.CharField(max_length=100, null=False, unique=True) project = models.ForeignKey(NormalProject, related_name='project_normalMetadata', on_delete=models.CASCADE) class DocumentNormalMetadataRelation(models.Model): metadata = models.ForeignKey(NormalMetadata, related_name='metadata', on_delete=models.CASCADE) document = models.ForeignKey(Document, related_name='document', on_delete=models.CASCADE) data = models.CharField(max_length=100, blank=True, null=True) class ParallelRelation(models.Model): doc_one = models.ManyToManyField(Document, related_name='doc_one') doc_two = models.ManyToManyField(Document, related_name='doc_two') class ParallelProject(models.Model): name = models.CharField(max_length=100, null=False, unique=True) owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='owner_parallelproject', on_delete=models.CASCADE) project_members = models.ManyToManyField(settings.AUTH_USER_MODEL) relations = models.ManyToManyField(ParallelRelation) public_status = models.BooleanField(default=0) timestamp = models.DateTimeField(auto_now_add=True) def is_public(self): return bool(self.public_status) def get_project_members(self): return self.project_members def __str__(self): return str(self.name) class ParallelMetadata(models.Model): name = models.CharField(max_length=100, null=False) project = models.ForeignKey(ParallelProject, related_name='project_parallelmetadata', on_delete=models.CASCADE) class DocumentParallelMetadaRelation(models.Model):#falta metadata = models.ForeignKey(ParallelMetadata, related_name="parallelmetadata", on_delete=models.CASCADE) relation = models.ForeignKey(ParallelRelation, related_name="relation", on_delete=models.CASCADE) data = models.CharField(max_length=100, blank=True, null=True)
[ "django.db.models.FileField", "django.db.models.ManyToManyField", "django.db.models.CharField", "django.db.models.ForeignKey", "django.db.models.BooleanField", "django.db.models.DateTimeField" ]
[((183, 240), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(False)', 'unique': '(True)'}), '(max_length=100, null=False, unique=True)\n', (199, 240), False, 'from django.db import models\n'), ((253, 363), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'related_name': '"""owner_normalproject"""', 'on_delete': 'models.CASCADE'}), "(settings.AUTH_USER_MODEL, related_name=\n 'owner_normalproject', on_delete=models.CASCADE)\n", (270, 363), False, 'from django.db import models\n'), ((381, 429), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (403, 429), False, 'from django.db import models\n'), ((450, 480), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(0)'}), '(default=0)\n', (469, 480), False, 'from django.db import models\n'), ((501, 531), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(0)'}), '(default=0)\n', (520, 531), False, 'from django.db import models\n'), ((548, 587), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (568, 587), False, 'from django.db import models\n'), ((1092, 1200), 'django.db.models.FileField', 'models.FileField', ([], {'blank': '(False)', 'null': '(False)', 'upload_to': '"""mediafiles/"""', 'validators': '[validate_file_extension]'}), "(blank=False, null=False, upload_to='mediafiles/',\n validators=[validate_file_extension])\n", (1108, 1200), False, 'from django.db import models\n'), ((1208, 1265), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(False)', 'unique': '(True)'}), '(max_length=100, null=False, unique=True)\n', (1224, 1265), False, 'from django.db import models\n'), ((1278, 1382), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'related_name': '"""owner_document"""', 'on_delete': 'models.CASCADE'}), "(settings.AUTH_USER_MODEL, related_name='owner_document',\n on_delete=models.CASCADE)\n", (1295, 1382), False, 'from django.db import models\n'), ((1392, 1488), 'django.db.models.ForeignKey', 'models.ForeignKey', (['NormalProject'], {'related_name': '"""project_document"""', 'on_delete': 'models.CASCADE'}), "(NormalProject, related_name='project_document', on_delete\n =models.CASCADE)\n", (1409, 1488), False, 'from django.db import models\n'), ((1501, 1533), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1517, 1533), False, 'from django.db import models\n'), ((1550, 1589), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1570, 1589), False, 'from django.db import models\n'), ((1778, 1835), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(False)', 'unique': '(True)'}), '(max_length=100, null=False, unique=True)\n', (1794, 1835), False, 'from django.db import models\n'), ((1850, 1951), 'django.db.models.ForeignKey', 'models.ForeignKey', (['NormalProject'], {'related_name': '"""project_normalMetadata"""', 'on_delete': 'models.CASCADE'}), "(NormalProject, related_name='project_normalMetadata',\n on_delete=models.CASCADE)\n", (1867, 1951), False, 'from django.db import models\n'), ((2016, 2105), 'django.db.models.ForeignKey', 'models.ForeignKey', (['NormalMetadata'], {'related_name': '"""metadata"""', 'on_delete': 'models.CASCADE'}), "(NormalMetadata, related_name='metadata', on_delete=models\n .CASCADE)\n", (2033, 2105), False, 'from django.db import models\n'), ((2116, 2194), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Document'], {'related_name': '"""document"""', 'on_delete': 'models.CASCADE'}), "(Document, related_name='document', on_delete=models.CASCADE)\n", (2133, 2194), False, 'from django.db import models\n'), ((2206, 2261), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (2222, 2261), False, 'from django.db import models\n'), ((2315, 2371), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Document'], {'related_name': '"""doc_one"""'}), "(Document, related_name='doc_one')\n", (2337, 2371), False, 'from django.db import models\n'), ((2386, 2442), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Document'], {'related_name': '"""doc_two"""'}), "(Document, related_name='doc_two')\n", (2408, 2442), False, 'from django.db import models\n'), ((2492, 2549), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(False)', 'unique': '(True)'}), '(max_length=100, null=False, unique=True)\n', (2508, 2549), False, 'from django.db import models\n'), ((2562, 2674), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'related_name': '"""owner_parallelproject"""', 'on_delete': 'models.CASCADE'}), "(settings.AUTH_USER_MODEL, related_name=\n 'owner_parallelproject', on_delete=models.CASCADE)\n", (2579, 2674), False, 'from django.db import models\n'), ((2692, 2740), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (2714, 2740), False, 'from django.db import models\n'), ((2757, 2797), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['ParallelRelation'], {}), '(ParallelRelation)\n', (2779, 2797), False, 'from django.db import models\n'), ((2818, 2848), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(0)'}), '(default=0)\n', (2837, 2848), False, 'from django.db import models\n'), ((2865, 2904), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2885, 2904), False, 'from django.db import models\n'), ((3147, 3191), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(False)'}), '(max_length=100, null=False)\n', (3163, 3191), False, 'from django.db import models\n'), ((3206, 3311), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ParallelProject'], {'related_name': '"""project_parallelmetadata"""', 'on_delete': 'models.CASCADE'}), "(ParallelProject, related_name='project_parallelmetadata',\n on_delete=models.CASCADE)\n", (3223, 3311), False, 'from django.db import models\n'), ((3382, 3480), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ParallelMetadata'], {'related_name': '"""parallelmetadata"""', 'on_delete': 'models.CASCADE'}), "(ParallelMetadata, related_name='parallelmetadata',\n on_delete=models.CASCADE)\n", (3399, 3480), False, 'from django.db import models\n'), ((3492, 3583), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ParallelRelation'], {'related_name': '"""relation"""', 'on_delete': 'models.CASCADE'}), "(ParallelRelation, related_name='relation', on_delete=\n models.CASCADE)\n", (3509, 3583), False, 'from django.db import models\n'), ((3590, 3645), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (3606, 3645), False, 'from django.db import models\n')]
import numpy as np import pandas as pd import torch import torch.utils.data import torch.optim as optim from torch.optim import Adam from torch.nn import functional as F from torch.nn import (Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss,SmoothL1Loss) from model.synthesizer.transformer import ImageTransformer,DataTransformer from tqdm import tqdm class Classifier(Module): def __init__(self,input_dim, dis_dims,st_ed): super(Classifier,self).__init__() dim = input_dim-(st_ed[1]-st_ed[0]) seq = [] self.str_end = st_ed for item in list(dis_dims): seq += [ Linear(dim, item), LeakyReLU(0.2), Dropout(0.5) ] dim = item if (st_ed[1]-st_ed[0])==1: seq += [Linear(dim, 1)] elif (st_ed[1]-st_ed[0])==2: seq += [Linear(dim, 1),Sigmoid()] else: seq += [Linear(dim,(st_ed[1]-st_ed[0]))] self.seq = Sequential(*seq) def forward(self, input): label=None if (self.str_end[1]-self.str_end[0])==1: label = input[:, self.str_end[0]:self.str_end[1]] else: label = torch.argmax(input[:, self.str_end[0]:self.str_end[1]], axis=-1) new_imp = torch.cat((input[:,:self.str_end[0]],input[:,self.str_end[1]:]),1) if ((self.str_end[1]-self.str_end[0])==2) | ((self.str_end[1]-self.str_end[0])==1): return self.seq(new_imp).view(-1), label else: return self.seq(new_imp), label def apply_activate(data, output_info): data_t = [] st = 0 for item in output_info: if item[1] == 'tanh': ed = st + item[0] data_t.append(torch.tanh(data[:, st:ed])) st = ed elif item[1] == 'softmax': ed = st + item[0] data_t.append(F.gumbel_softmax(data[:, st:ed], tau=0.2)) st = ed return torch.cat(data_t, dim=1) def get_st_ed(target_col_index,output_info): st = 0 c= 0 tc= 0 for item in output_info: if c==target_col_index: break if item[1]=='tanh': st += item[0] elif item[1] == 'softmax': st += item[0] c+=1 tc+=1 ed= st+output_info[tc][0] return (st,ed) def random_choice_prob_index_sampling(probs,col_idx): option_list = [] for i in col_idx: pp = probs[i] option_list.append(np.random.choice(np.arange(len(probs[i])), p=pp)) return np.array(option_list).reshape(col_idx.shape) def random_choice_prob_index(a, axis=1): r = np.expand_dims(np.random.rand(a.shape[1 - axis]), axis=axis) return (a.cumsum(axis=axis) > r).argmax(axis=axis) def maximum_interval(output_info): max_interval = 0 for item in output_info: max_interval = max(max_interval, item[0]) return max_interval class Cond(object): def __init__(self, data, output_info): self.model = [] st = 0 counter = 0 for item in output_info: if item[1] == 'tanh': st += item[0] continue elif item[1] == 'softmax': ed = st + item[0] counter += 1 self.model.append(np.argmax(data[:, st:ed], axis=-1)) st = ed self.interval = [] self.n_col = 0 self.n_opt = 0 st = 0 self.p = np.zeros((counter, maximum_interval(output_info))) self.p_sampling = [] for item in output_info: if item[1] == 'tanh': st += item[0] continue elif item[1] == 'softmax': ed = st + item[0] tmp = np.sum(data[:, st:ed], axis=0) tmp_sampling = np.sum(data[:, st:ed], axis=0) tmp = np.log(tmp + 1) tmp = tmp / np.sum(tmp) tmp_sampling = tmp_sampling / np.sum(tmp_sampling) self.p_sampling.append(tmp_sampling) self.p[self.n_col, :item[0]] = tmp self.interval.append((self.n_opt, item[0])) self.n_opt += item[0] self.n_col += 1 st = ed self.interval = np.asarray(self.interval) def sample_train(self, batch): if self.n_col == 0: return None batch = batch idx = np.random.choice(np.arange(self.n_col), batch) vec = np.zeros((batch, self.n_opt), dtype='float32') mask = np.zeros((batch, self.n_col), dtype='float32') mask[np.arange(batch), idx] = 1 opt1prime = random_choice_prob_index(self.p[idx]) for i in np.arange(batch): vec[i, self.interval[idx[i], 0] + opt1prime[i]] = 1 return vec, mask, idx, opt1prime def sample(self, batch): if self.n_col == 0: return None batch = batch idx = np.random.choice(np.arange(self.n_col), batch) vec = np.zeros((batch, self.n_opt), dtype='float32') opt1prime = random_choice_prob_index_sampling(self.p_sampling,idx) for i in np.arange(batch): vec[i, self.interval[idx[i], 0] + opt1prime[i]] = 1 return vec def cond_loss(data, output_info, c, m): loss = [] st = 0 st_c = 0 for item in output_info: if item[1] == 'tanh': st += item[0] continue elif item[1] == 'softmax': ed = st + item[0] ed_c = st_c + item[0] tmp = F.cross_entropy( data[:, st:ed], torch.argmax(c[:, st_c:ed_c], dim=1), reduction='none') loss.append(tmp) st = ed st_c = ed_c loss = torch.stack(loss, dim=1) return (loss * m).sum() / data.size()[0] class Sampler(object): def __init__(self, data, output_info): super(Sampler, self).__init__() self.data = data self.model = [] self.n = len(data) st = 0 for item in output_info: if item[1] == 'tanh': st += item[0] continue elif item[1] == 'softmax': ed = st + item[0] tmp = [] for j in range(item[0]): tmp.append(np.nonzero(data[:, st + j])[0]) self.model.append(tmp) st = ed def sample(self, n, col, opt): if col is None: idx = np.random.choice(np.arange(self.n), n) return self.data[idx] idx = [] for c, o in zip(col, opt): idx.append(np.random.choice(self.model[c][o])) return self.data[idx] class Discriminator(Module): def __init__(self, side, layers): super(Discriminator, self).__init__() self.side = side info = len(layers)-2 self.seq = Sequential(*layers) self.seq_info = Sequential(*layers[:info]) def forward(self, input): return (self.seq(input)), self.seq_info(input) class Generator(Module): def __init__(self, side, layers): super(Generator, self).__init__() self.side = side self.seq = Sequential(*layers) def forward(self, input_): return self.seq(input_) def determine_layers_disc(side, num_channels): assert side >= 4 and side <= 32 layer_dims = [(1, side), (num_channels, side // 2)] while layer_dims[-1][1] > 3 and len(layer_dims) < 4: layer_dims.append((layer_dims[-1][0] * 2, layer_dims[-1][1] // 2)) layers_D = [] for prev, curr in zip(layer_dims, layer_dims[1:]): layers_D += [ Conv2d(prev[0], curr[0], 4, 2, 1, bias=False), BatchNorm2d(curr[0]), LeakyReLU(0.2, inplace=True) ] print() layers_D += [ Conv2d(layer_dims[-1][0], 1, layer_dims[-1][1], 1, 0), Sigmoid() ] return layers_D def determine_layers_gen(side, random_dim, num_channels): assert side >= 4 and side <= 32 layer_dims = [(1, side), (num_channels, side // 2)] while layer_dims[-1][1] > 3 and len(layer_dims) < 4: layer_dims.append((layer_dims[-1][0] * 2, layer_dims[-1][1] // 2)) layers_G = [ ConvTranspose2d( random_dim, layer_dims[-1][0], layer_dims[-1][1], 1, 0, output_padding=0, bias=False) ] for prev, curr in zip(reversed(layer_dims), reversed(layer_dims[:-1])): layers_G += [ BatchNorm2d(prev[0]), ReLU(True), ConvTranspose2d(prev[0], curr[0], 4, 2, 1, output_padding=0, bias=True) ] return layers_G def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: init.normal_(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm') != -1: init.normal_(m.weight.data, 1.0, 0.02) init.constant_(m.bias.data, 0) class CTABGANSynthesizer: def __init__(self, class_dim=(256, 256, 256, 256), random_dim=100, num_channels=64, l2scale=1e-5, batch_size=500, epochs=1): self.random_dim = random_dim self.class_dim = class_dim self.num_channels = num_channels self.dside = None self.gside = None self.l2scale = l2scale self.batch_size = batch_size self.epochs = epochs self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") def fit(self, train_data=pd.DataFrame, categorical=[], mixed={}, type={}): problem_type = None target_index=None if type: problem_type = list(type.keys())[0] if problem_type: target_index = train_data.columns.get_loc(type[problem_type]) self.transformer = DataTransformer(train_data=train_data, categorical_list=categorical, mixed_dict=mixed) self.transformer.fit() train_data = self.transformer.transform(train_data.values) data_sampler = Sampler(train_data, self.transformer.output_info) data_dim = self.transformer.output_dim self.cond_generator = Cond(train_data, self.transformer.output_info) sides = [4, 8, 16, 24, 32] col_size_d = data_dim + self.cond_generator.n_opt for i in sides: if i * i >= col_size_d: self.dside = i break sides = [4, 8, 16, 24, 32] col_size_g = data_dim for i in sides: if i * i >= col_size_g: self.gside = i break layers_G = determine_layers_gen(self.gside, self.random_dim+self.cond_generator.n_opt, self.num_channels) layers_D = determine_layers_disc(self.dside, self.num_channels) self.generator = Generator(self.gside, layers_G).to(self.device) discriminator = Discriminator(self.dside, layers_D).to(self.device) optimizer_params = dict(lr=2e-4, betas=(0.5, 0.9), eps=1e-3, weight_decay=self.l2scale) optimizerG = Adam(self.generator.parameters(), **optimizer_params) optimizerD = Adam(discriminator.parameters(), **optimizer_params) st_ed = None classifier=None optimizerC= None if target_index != None: st_ed= get_st_ed(target_index,self.transformer.output_info) classifier = Classifier(data_dim,self.class_dim,st_ed).to(self.device) optimizerC = optim.Adam(classifier.parameters(),**optimizer_params) self.generator.apply(weights_init) discriminator.apply(weights_init) self.Gtransformer = ImageTransformer(self.gside) self.Dtransformer = ImageTransformer(self.dside) steps_per_epoch = max(1, len(train_data) // self.batch_size) for i in tqdm(range(self.epochs)): for _ in range(steps_per_epoch): noisez = torch.randn(self.batch_size, self.random_dim, device=self.device) condvec = self.cond_generator.sample_train(self.batch_size) c, m, col, opt = condvec c = torch.from_numpy(c).to(self.device) m = torch.from_numpy(m).to(self.device) noisez = torch.cat([noisez, c], dim=1) noisez = noisez.view(self.batch_size,self.random_dim+self.cond_generator.n_opt,1,1) perm = np.arange(self.batch_size) np.random.shuffle(perm) real = data_sampler.sample(self.batch_size, col[perm], opt[perm]) c_perm = c[perm] real = torch.from_numpy(real.astype('float32')).to(self.device) fake = self.generator(noisez) faket = self.Gtransformer.inverse_transform(fake) fakeact = apply_activate(faket, self.transformer.output_info) fake_cat = torch.cat([fakeact, c], dim=1) real_cat = torch.cat([real, c_perm], dim=1) real_cat_d = self.Dtransformer.transform(real_cat) fake_cat_d = self.Dtransformer.transform(fake_cat) optimizerD.zero_grad() y_real,_ = discriminator(real_cat_d) y_fake,_ = discriminator(fake_cat_d) loss_d = (-(torch.log(y_real + 1e-4).mean()) - (torch.log(1. - y_fake + 1e-4).mean())) loss_d.backward() optimizerD.step() noisez = torch.randn(self.batch_size, self.random_dim, device=self.device) condvec = self.cond_generator.sample_train(self.batch_size) c, m, col, opt = condvec c = torch.from_numpy(c).to(self.device) m = torch.from_numpy(m).to(self.device) noisez = torch.cat([noisez, c], dim=1) noisez = noisez.view(self.batch_size,self.random_dim+self.cond_generator.n_opt,1,1) optimizerG.zero_grad() fake = self.generator(noisez) faket = self.Gtransformer.inverse_transform(fake) fakeact = apply_activate(faket, self.transformer.output_info) fake_cat = torch.cat([fakeact, c], dim=1) fake_cat = self.Dtransformer.transform(fake_cat) y_fake,info_fake = discriminator(fake_cat) cross_entropy = cond_loss(faket, self.transformer.output_info, c, m) _,info_real = discriminator(real_cat_d) g = -(torch.log(y_fake + 1e-4).mean()) + cross_entropy g.backward(retain_graph=True) loss_mean = torch.norm(torch.mean(info_fake.view(self.batch_size,-1), dim=0) - torch.mean(info_real.view(self.batch_size,-1), dim=0), 1) loss_std = torch.norm(torch.std(info_fake.view(self.batch_size,-1), dim=0) - torch.std(info_real.view(self.batch_size,-1), dim=0), 1) loss_info = loss_mean + loss_std loss_info.backward() optimizerG.step() if problem_type: fake = self.generator(noisez) faket = self.Gtransformer.inverse_transform(fake) fakeact = apply_activate(faket, self.transformer.output_info) real_pre, real_label = classifier(real) fake_pre, fake_label = classifier(fakeact) c_loss = CrossEntropyLoss() if (st_ed[1] - st_ed[0])==1: c_loss= SmoothL1Loss() real_label = real_label.type_as(real_pre) fake_label = fake_label.type_as(fake_pre) real_label = torch.reshape(real_label,real_pre.size()) fake_label = torch.reshape(fake_label,fake_pre.size()) elif (st_ed[1] - st_ed[0])==2: c_loss = BCELoss() real_label = real_label.type_as(real_pre) fake_label = fake_label.type_as(fake_pre) loss_cc = c_loss(real_pre, real_label) loss_cg = c_loss(fake_pre, fake_label) optimizerG.zero_grad() loss_cg.backward() optimizerG.step() optimizerC.zero_grad() loss_cc.backward() optimizerC.step() def sample(self, n): self.generator.eval() output_info = self.transformer.output_info steps = n // self.batch_size + 1 data = [] for i in range(steps): noisez = torch.randn(self.batch_size, self.random_dim, device=self.device) condvec = self.cond_generator.sample(self.batch_size) c = condvec c = torch.from_numpy(c).to(self.device) noisez = torch.cat([noisez, c], dim=1) noisez = noisez.view(self.batch_size,self.random_dim+self.cond_generator.n_opt,1,1) fake = self.generator(noisez) faket = self.Gtransformer.inverse_transform(fake) fakeact = apply_activate(faket,output_info) data.append(fakeact.detach().cpu().numpy()) data = np.concatenate(data, axis=0) result = self.transformer.inverse_transform(data) return result[0:n]
[ "torch.nn.Dropout", "numpy.sum", "numpy.argmax", "torch.argmax", "torch.cat", "torch.randn", "torch.nn.init.constant_", "numpy.arange", "torch.nn.BCELoss", "model.synthesizer.transformer.ImageTransformer", "torch.nn.Linear", "numpy.random.choice", "torch.log", "numpy.random.shuffle", "numpy.asarray", "torch.nn.Conv2d", "torch.nn.BatchNorm2d", "torch.cuda.is_available", "torch.nn.LeakyReLU", "numpy.concatenate", "torch.nn.Sigmoid", "torch.tanh", "torch.from_numpy", "torch.nn.ReLU", "torch.stack", "torch.nn.ConvTranspose2d", "torch.nn.Sequential", "numpy.log", "numpy.zeros", "torch.nn.CrossEntropyLoss", "numpy.nonzero", "model.synthesizer.transformer.DataTransformer", "torch.nn.init.normal_", "numpy.array", "numpy.random.rand", "torch.nn.SmoothL1Loss", "torch.nn.functional.gumbel_softmax" ]
[((2100, 2124), 'torch.cat', 'torch.cat', (['data_t'], {'dim': '(1)'}), '(data_t, dim=1)\n', (2109, 2124), False, 'import torch\n'), ((6050, 6074), 'torch.stack', 'torch.stack', (['loss'], {'dim': '(1)'}), '(loss, dim=1)\n', (6061, 6074), False, 'import torch\n'), ((1104, 1120), 'torch.nn.Sequential', 'Sequential', (['*seq'], {}), '(*seq)\n', (1114, 1120), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((1426, 1496), 'torch.cat', 'torch.cat', (['(input[:, :self.str_end[0]], input[:, self.str_end[1]:])', '(1)'], {}), '((input[:, :self.str_end[0]], input[:, self.str_end[1]:]), 1)\n', (1435, 1496), False, 'import torch\n'), ((2803, 2836), 'numpy.random.rand', 'np.random.rand', (['a.shape[1 - axis]'], {}), '(a.shape[1 - axis])\n', (2817, 2836), True, 'import numpy as np\n'), ((4504, 4529), 'numpy.asarray', 'np.asarray', (['self.interval'], {}), '(self.interval)\n', (4514, 4529), True, 'import numpy as np\n'), ((4725, 4771), 'numpy.zeros', 'np.zeros', (['(batch, self.n_opt)'], {'dtype': '"""float32"""'}), "((batch, self.n_opt), dtype='float32')\n", (4733, 4771), True, 'import numpy as np\n'), ((4787, 4833), 'numpy.zeros', 'np.zeros', (['(batch, self.n_col)'], {'dtype': '"""float32"""'}), "((batch, self.n_col), dtype='float32')\n", (4795, 4833), True, 'import numpy as np\n'), ((4952, 4968), 'numpy.arange', 'np.arange', (['batch'], {}), '(batch)\n', (4961, 4968), True, 'import numpy as np\n'), ((5275, 5321), 'numpy.zeros', 'np.zeros', (['(batch, self.n_opt)'], {'dtype': '"""float32"""'}), "((batch, self.n_opt), dtype='float32')\n", (5283, 5321), True, 'import numpy as np\n'), ((5423, 5439), 'numpy.arange', 'np.arange', (['batch'], {}), '(batch)\n', (5432, 5439), True, 'import numpy as np\n'), ((7200, 7219), 'torch.nn.Sequential', 'Sequential', (['*layers'], {}), '(*layers)\n', (7210, 7219), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((7244, 7270), 'torch.nn.Sequential', 'Sequential', (['*layers[:info]'], {}), '(*layers[:info])\n', (7254, 7270), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((7507, 7526), 'torch.nn.Sequential', 'Sequential', (['*layers'], {}), '(*layers)\n', (7517, 7526), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((8144, 8197), 'torch.nn.Conv2d', 'Conv2d', (['layer_dims[-1][0]', '(1)', 'layer_dims[-1][1]', '(1)', '(0)'], {}), '(layer_dims[-1][0], 1, layer_dims[-1][1], 1, 0)\n', (8150, 8197), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((8208, 8217), 'torch.nn.Sigmoid', 'Sigmoid', ([], {}), '()\n', (8215, 8217), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((8561, 8666), 'torch.nn.ConvTranspose2d', 'ConvTranspose2d', (['random_dim', 'layer_dims[-1][0]', 'layer_dims[-1][1]', '(1)', '(0)'], {'output_padding': '(0)', 'bias': '(False)'}), '(random_dim, layer_dims[-1][0], layer_dims[-1][1], 1, 0,\n output_padding=0, bias=False)\n', (8576, 8666), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((9063, 9101), 'torch.nn.init.normal_', 'init.normal_', (['m.weight.data', '(0.0)', '(0.02)'], {}), '(m.weight.data, 0.0, 0.02)\n', (9075, 9101), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((10196, 10286), 'model.synthesizer.transformer.DataTransformer', 'DataTransformer', ([], {'train_data': 'train_data', 'categorical_list': 'categorical', 'mixed_dict': 'mixed'}), '(train_data=train_data, categorical_list=categorical,\n mixed_dict=mixed)\n', (10211, 10286), False, 'from model.synthesizer.transformer import ImageTransformer, DataTransformer\n'), ((12066, 12094), 'model.synthesizer.transformer.ImageTransformer', 'ImageTransformer', (['self.gside'], {}), '(self.gside)\n', (12082, 12094), False, 'from model.synthesizer.transformer import ImageTransformer, DataTransformer\n'), ((12130, 12158), 'model.synthesizer.transformer.ImageTransformer', 'ImageTransformer', (['self.dside'], {}), '(self.dside)\n', (12146, 12158), False, 'from model.synthesizer.transformer import ImageTransformer, DataTransformer\n'), ((18112, 18140), 'numpy.concatenate', 'np.concatenate', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (18126, 18140), True, 'import numpy as np\n'), ((1334, 1398), 'torch.argmax', 'torch.argmax', (['input[:, self.str_end[0]:self.str_end[1]]'], {'axis': '(-1)'}), '(input[:, self.str_end[0]:self.str_end[1]], axis=-1)\n', (1346, 1398), False, 'import torch\n'), ((2693, 2714), 'numpy.array', 'np.array', (['option_list'], {}), '(option_list)\n', (2701, 2714), True, 'import numpy as np\n'), ((4680, 4701), 'numpy.arange', 'np.arange', (['self.n_col'], {}), '(self.n_col)\n', (4689, 4701), True, 'import numpy as np\n'), ((5230, 5251), 'numpy.arange', 'np.arange', (['self.n_col'], {}), '(self.n_col)\n', (5239, 5251), True, 'import numpy as np\n'), ((7973, 8018), 'torch.nn.Conv2d', 'Conv2d', (['prev[0]', 'curr[0]', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(prev[0], curr[0], 4, 2, 1, bias=False)\n', (7979, 8018), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((8032, 8052), 'torch.nn.BatchNorm2d', 'BatchNorm2d', (['curr[0]'], {}), '(curr[0])\n', (8043, 8052), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((8066, 8094), 'torch.nn.LeakyReLU', 'LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (8075, 8094), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((8793, 8813), 'torch.nn.BatchNorm2d', 'BatchNorm2d', (['prev[0]'], {}), '(prev[0])\n', (8804, 8813), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((8827, 8837), 'torch.nn.ReLU', 'ReLU', (['(True)'], {}), '(True)\n', (8831, 8837), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((8851, 8922), 'torch.nn.ConvTranspose2d', 'ConvTranspose2d', (['prev[0]', 'curr[0]', '(4)', '(2)', '(1)'], {'output_padding': '(0)', 'bias': '(True)'}), '(prev[0], curr[0], 4, 2, 1, output_padding=0, bias=True)\n', (8866, 8922), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((9155, 9193), 'torch.nn.init.normal_', 'init.normal_', (['m.weight.data', '(1.0)', '(0.02)'], {}), '(m.weight.data, 1.0, 0.02)\n', (9167, 9193), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((9202, 9232), 'torch.nn.init.constant_', 'init.constant_', (['m.bias.data', '(0)'], {}), '(m.bias.data, 0)\n', (9216, 9232), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((17507, 17572), 'torch.randn', 'torch.randn', (['self.batch_size', 'self.random_dim'], {'device': 'self.device'}), '(self.batch_size, self.random_dim, device=self.device)\n', (17518, 17572), False, 'import torch\n'), ((17736, 17765), 'torch.cat', 'torch.cat', (['[noisez, c]'], {'dim': '(1)'}), '([noisez, c], dim=1)\n', (17745, 17765), False, 'import torch\n'), ((719, 736), 'torch.nn.Linear', 'Linear', (['dim', 'item'], {}), '(dim, item)\n', (725, 736), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((754, 768), 'torch.nn.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (763, 768), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((786, 798), 'torch.nn.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (793, 798), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((900, 914), 'torch.nn.Linear', 'Linear', (['dim', '(1)'], {}), '(dim, 1)\n', (906, 914), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((1887, 1913), 'torch.tanh', 'torch.tanh', (['data[:, st:ed]'], {}), '(data[:, st:ed])\n', (1897, 1913), False, 'import torch\n'), ((4847, 4863), 'numpy.arange', 'np.arange', (['batch'], {}), '(batch)\n', (4856, 4863), True, 'import numpy as np\n'), ((6816, 6833), 'numpy.arange', 'np.arange', (['self.n'], {}), '(self.n)\n', (6825, 6833), True, 'import numpy as np\n'), ((6947, 6981), 'numpy.random.choice', 'np.random.choice', (['self.model[c][o]'], {}), '(self.model[c][o])\n', (6963, 6981), True, 'import numpy as np\n'), ((9819, 9844), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9842, 9844), False, 'import torch\n'), ((12376, 12441), 'torch.randn', 'torch.randn', (['self.batch_size', 'self.random_dim'], {'device': 'self.device'}), '(self.batch_size, self.random_dim, device=self.device)\n', (12387, 12441), False, 'import torch\n'), ((12697, 12726), 'torch.cat', 'torch.cat', (['[noisez, c]'], {'dim': '(1)'}), '([noisez, c], dim=1)\n', (12706, 12726), False, 'import torch\n'), ((12871, 12897), 'numpy.arange', 'np.arange', (['self.batch_size'], {}), '(self.batch_size)\n', (12880, 12897), True, 'import numpy as np\n'), ((12914, 12937), 'numpy.random.shuffle', 'np.random.shuffle', (['perm'], {}), '(perm)\n', (12931, 12937), True, 'import numpy as np\n'), ((13412, 13442), 'torch.cat', 'torch.cat', (['[fakeact, c]'], {'dim': '(1)'}), '([fakeact, c], dim=1)\n', (13421, 13442), False, 'import torch\n'), ((13470, 13502), 'torch.cat', 'torch.cat', (['[real, c_perm]'], {'dim': '(1)'}), '([real, c_perm], dim=1)\n', (13479, 13502), False, 'import torch\n'), ((14041, 14106), 'torch.randn', 'torch.randn', (['self.batch_size', 'self.random_dim'], {'device': 'self.device'}), '(self.batch_size, self.random_dim, device=self.device)\n', (14052, 14106), False, 'import torch\n'), ((14379, 14408), 'torch.cat', 'torch.cat', (['[noisez, c]'], {'dim': '(1)'}), '([noisez, c], dim=1)\n', (14388, 14408), False, 'import torch\n'), ((14769, 14799), 'torch.cat', 'torch.cat', (['[fakeact, c]'], {'dim': '(1)'}), '([fakeact, c], dim=1)\n', (14778, 14799), False, 'import torch\n'), ((982, 996), 'torch.nn.Linear', 'Linear', (['dim', '(1)'], {}), '(dim, 1)\n', (988, 996), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((997, 1006), 'torch.nn.Sigmoid', 'Sigmoid', ([], {}), '()\n', (1004, 1006), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((1042, 1074), 'torch.nn.Linear', 'Linear', (['dim', '(st_ed[1] - st_ed[0])'], {}), '(dim, st_ed[1] - st_ed[0])\n', (1048, 1074), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((2026, 2067), 'torch.nn.functional.gumbel_softmax', 'F.gumbel_softmax', (['data[:, st:ed]'], {'tau': '(0.2)'}), '(data[:, st:ed], tau=0.2)\n', (2042, 2067), True, 'from torch.nn import functional as F\n'), ((3958, 3988), 'numpy.sum', 'np.sum', (['data[:, st:ed]'], {'axis': '(0)'}), '(data[:, st:ed], axis=0)\n', (3964, 3988), True, 'import numpy as np\n'), ((4022, 4052), 'numpy.sum', 'np.sum', (['data[:, st:ed]'], {'axis': '(0)'}), '(data[:, st:ed], axis=0)\n', (4028, 4052), True, 'import numpy as np\n'), ((4080, 4095), 'numpy.log', 'np.log', (['(tmp + 1)'], {}), '(tmp + 1)\n', (4086, 4095), True, 'import numpy as np\n'), ((5897, 5933), 'torch.argmax', 'torch.argmax', (['c[:, st_c:ed_c]'], {'dim': '(1)'}), '(c[:, st_c:ed_c], dim=1)\n', (5909, 5933), False, 'import torch\n'), ((16165, 16183), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (16181, 16183), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((17679, 17698), 'torch.from_numpy', 'torch.from_numpy', (['c'], {}), '(c)\n', (17695, 17698), False, 'import torch\n'), ((3465, 3499), 'numpy.argmax', 'np.argmax', (['data[:, st:ed]'], {'axis': '(-1)'}), '(data[:, st:ed], axis=-1)\n', (3474, 3499), True, 'import numpy as np\n'), ((4126, 4137), 'numpy.sum', 'np.sum', (['tmp'], {}), '(tmp)\n', (4132, 4137), True, 'import numpy as np\n'), ((4184, 4204), 'numpy.sum', 'np.sum', (['tmp_sampling'], {}), '(tmp_sampling)\n', (4190, 4204), True, 'import numpy as np\n'), ((12580, 12599), 'torch.from_numpy', 'torch.from_numpy', (['c'], {}), '(c)\n', (12596, 12599), False, 'import torch\n'), ((12636, 12655), 'torch.from_numpy', 'torch.from_numpy', (['m'], {}), '(m)\n', (12652, 12655), False, 'import torch\n'), ((14262, 14281), 'torch.from_numpy', 'torch.from_numpy', (['c'], {}), '(c)\n', (14278, 14281), False, 'import torch\n'), ((14318, 14337), 'torch.from_numpy', 'torch.from_numpy', (['m'], {}), '(m)\n', (14334, 14337), False, 'import torch\n'), ((16287, 16301), 'torch.nn.SmoothL1Loss', 'SmoothL1Loss', ([], {}), '()\n', (16299, 16301), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((13888, 13920), 'torch.log', 'torch.log', (['(1.0 - y_fake + 0.0001)'], {}), '(1.0 - y_fake + 0.0001)\n', (13897, 13920), False, 'import torch\n'), ((16722, 16731), 'torch.nn.BCELoss', 'BCELoss', ([], {}), '()\n', (16729, 16731), False, 'from torch.nn import Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Conv2d, ConvTranspose2d, BatchNorm2d, Sigmoid, init, BCELoss, CrossEntropyLoss, SmoothL1Loss\n'), ((6610, 6637), 'numpy.nonzero', 'np.nonzero', (['data[:, st + j]'], {}), '(data[:, st + j])\n', (6620, 6637), True, 'import numpy as np\n'), ((13852, 13878), 'torch.log', 'torch.log', (['(y_real + 0.0001)'], {}), '(y_real + 0.0001)\n', (13861, 13878), False, 'import torch\n'), ((15144, 15170), 'torch.log', 'torch.log', (['(y_fake + 0.0001)'], {}), '(y_fake + 0.0001)\n', (15153, 15170), False, 'import torch\n')]
import requests import json from bs4 import BeautifulSoup from gamayun.gamayun_utils import report_result_with_maps_only from gamayun.gamayun_utils import report_error from gamayun.gamayun_utils import run_gamayun_script_logic def parse_single_entry(entry): # test if this entry contains comment (if it doesn't it is an ad so we skip it) if entry.find("a", class_="comments") is not None: result = dict() result["title"] = entry.find("a", class_="title").text result["link"] = entry.find("a", class_="title")["href"] result["comments_link"] = entry.find("a", class_="comments")["href"] return result else: return None def job_logic(): headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux i686; rv:81.0) Gecko/20100101 Firefox/81.0'} page = requests.get(url = "https://old.reddit.com/r/programming/", headers = headers) soup = BeautifulSoup(page.content, 'html.parser') result = [x for x in [parse_single_entry(entry) for entry in soup.find_all("div", class_ = "top-matter")] if x is not None] report_result_with_maps_only(result) run_gamayun_script_logic(job_logic)
[ "bs4.BeautifulSoup", "gamayun.gamayun_utils.report_result_with_maps_only", "gamayun.gamayun_utils.run_gamayun_script_logic", "requests.get" ]
[((1110, 1145), 'gamayun.gamayun_utils.run_gamayun_script_logic', 'run_gamayun_script_logic', (['job_logic'], {}), '(job_logic)\n', (1134, 1145), False, 'from gamayun.gamayun_utils import run_gamayun_script_logic\n'), ((807, 881), 'requests.get', 'requests.get', ([], {'url': '"""https://old.reddit.com/r/programming/"""', 'headers': 'headers'}), "(url='https://old.reddit.com/r/programming/', headers=headers)\n", (819, 881), False, 'import requests\n'), ((897, 939), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content', '"""html.parser"""'], {}), "(page.content, 'html.parser')\n", (910, 939), False, 'from bs4 import BeautifulSoup\n'), ((1072, 1108), 'gamayun.gamayun_utils.report_result_with_maps_only', 'report_result_with_maps_only', (['result'], {}), '(result)\n', (1100, 1108), False, 'from gamayun.gamayun_utils import report_result_with_maps_only\n')]
""" util.py Some utility functions """ import os import numpy as np from sklearn.neighbors import BallTree, radius_neighbors_graph import networkx as nx __all__ = ["ORCA_PATH", "pbc", "orbits", "weights", "compute_graph"] ORCA_PATH = os.path.abspath(os.path.abspath(__file__) + "../../../orca/orca.exe") def pbc(x0, x1, dims): delta = np.abs(x0 - x1) delta = np.where(delta > 0.5 * dims, delta - dims, delta) return np.sqrt((delta**2).sum(axis=-1)) orbits = np.array([ 1, 2, 2, 2, 3, 4, 3, 3, 4, 3, 4, 4, 4, 4, 3, 4, 6, 5, 4, 5, 6, 6, 4, 4, 4, 5, 7, 4, 6, 6, 7, 4, 6, 6, 6, 5, 6, 7, 7, 5, 7, 6, 7, 6, 5, 5, 6, 8, 7, 6, 6, 8, 6, 9, 5, 6, 4, 6, 6, 7, 8, 6, 6, 8, 7, 6, 7, 7, 8, 5, 6, 6, 4 ], dtype=np.float) weights = 1. - np.log(orbits) / np.log(73.) def compute_graph(X, r_cut, **kwargs): if kwargs["dims"] is not None: BT = BallTree(X, metric=kwargs["metric"], dims=kwargs["dims"]) else: BT = BallTree(X, metric=kwargs["metric"]) rng_con = radius_neighbors_graph(BT, r_cut, n_jobs=1, mode='connectivity') A = np.matrix(rng_con.toarray()) G = nx.from_numpy_matrix(A) return G
[ "os.path.abspath", "numpy.abs", "networkx.from_numpy_matrix", "sklearn.neighbors.radius_neighbors_graph", "numpy.log", "numpy.where", "numpy.array", "sklearn.neighbors.BallTree" ]
[((477, 734), 'numpy.array', 'np.array', (['[1, 2, 2, 2, 3, 4, 3, 3, 4, 3, 4, 4, 4, 4, 3, 4, 6, 5, 4, 5, 6, 6, 4, 4, 4,\n 5, 7, 4, 6, 6, 7, 4, 6, 6, 6, 5, 6, 7, 7, 5, 7, 6, 7, 6, 5, 5, 6, 8, 7,\n 6, 6, 8, 6, 9, 5, 6, 4, 6, 6, 7, 8, 6, 6, 8, 7, 6, 7, 7, 8, 5, 6, 6, 4]'], {'dtype': 'np.float'}), '([1, 2, 2, 2, 3, 4, 3, 3, 4, 3, 4, 4, 4, 4, 3, 4, 6, 5, 4, 5, 6, 6,\n 4, 4, 4, 5, 7, 4, 6, 6, 7, 4, 6, 6, 6, 5, 6, 7, 7, 5, 7, 6, 7, 6, 5, 5,\n 6, 8, 7, 6, 6, 8, 6, 9, 5, 6, 4, 6, 6, 7, 8, 6, 6, 8, 7, 6, 7, 7, 8, 5,\n 6, 6, 4], dtype=np.float)\n', (485, 734), True, 'import numpy as np\n'), ((344, 359), 'numpy.abs', 'np.abs', (['(x0 - x1)'], {}), '(x0 - x1)\n', (350, 359), True, 'import numpy as np\n'), ((372, 421), 'numpy.where', 'np.where', (['(delta > 0.5 * dims)', '(delta - dims)', 'delta'], {}), '(delta > 0.5 * dims, delta - dims, delta)\n', (380, 421), True, 'import numpy as np\n'), ((1016, 1080), 'sklearn.neighbors.radius_neighbors_graph', 'radius_neighbors_graph', (['BT', 'r_cut'], {'n_jobs': '(1)', 'mode': '"""connectivity"""'}), "(BT, r_cut, n_jobs=1, mode='connectivity')\n", (1038, 1080), False, 'from sklearn.neighbors import BallTree, radius_neighbors_graph\n'), ((1126, 1149), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['A'], {}), '(A)\n', (1146, 1149), True, 'import networkx as nx\n'), ((253, 278), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (268, 278), False, 'import os\n'), ((766, 780), 'numpy.log', 'np.log', (['orbits'], {}), '(orbits)\n', (772, 780), True, 'import numpy as np\n'), ((783, 795), 'numpy.log', 'np.log', (['(73.0)'], {}), '(73.0)\n', (789, 795), True, 'import numpy as np\n'), ((884, 941), 'sklearn.neighbors.BallTree', 'BallTree', (['X'], {'metric': "kwargs['metric']", 'dims': "kwargs['dims']"}), "(X, metric=kwargs['metric'], dims=kwargs['dims'])\n", (892, 941), False, 'from sklearn.neighbors import BallTree, radius_neighbors_graph\n'), ((965, 1001), 'sklearn.neighbors.BallTree', 'BallTree', (['X'], {'metric': "kwargs['metric']"}), "(X, metric=kwargs['metric'])\n", (973, 1001), False, 'from sklearn.neighbors import BallTree, radius_neighbors_graph\n')]
from panda3d.core import Point3, TransformState, LQuaternion from panda3d.core import Camera, PerspectiveLens, OrthographicLens, CS_default, CS_zup_right, CS_yup_right, CS_zup_left, CS_yup_left, CS_invalid from panda3d.core import GeomVertexArrayFormat, Geom, GeomVertexFormat, GeomVertexData, GeomVertexWriter, Triangulator3, GeomTriangles from panda3d.core import GeomNode, PandaNode, NodePath, ModelRoot from panda3d.core import BamFile, BamWriter, Filename, Notify from panda3d.core import CollisionPolygon, CollisionNode import bpy import bmesh from mathutils.geometry import distance_point_to_plane ostream = Notify.out() list_object_support = {'MESH': False, 'PERSP': False, 'ORTHO': False, 'CAMERA':True} def show_message_box(message = "", title = "Message Box", icon = 'INFO'): def draw(self, context): self.layout.label(text = message) bpy.context.window_manager.popup_menu(draw, title = title, icon = icon) def checkcreate_dirs(path_project_save): # Проверяем существует ли директория, если нет то создаем. if not os.path.exists(path_project_save): try: os.makedirs(path_project_save) except OSError as error: #print(error) pass def bam_writer_file(path_save, obj): file = BamFile() file.openWrite(Filename.fromOsSpecific(path_save + '.bam')) writer: BamWriter = file.getWriter() writer.writeObject(obj) writer.flush() file.close() def conversion_transform(obj): pos = Point3(*obj.matrix_world.translation) quat = LQuaternion(*obj.matrix_world.to_quaternion()) scale = Point3(*obj.matrix_world.to_scale()) transform = TransformState.make_pos_quat_scale(pos, quat, scale) return transform def get_format(obj): color = False texcoord = False # Создаем новый массив. geom_vertex_format = GeomVertexArrayFormat() # Создаем колонку для вершин. geom_vertex_format.add_column("vertex", 3, Geom.NT_float32, Geom.C_point) geom_vertex_format.add_column("normal", 3, Geom.NT_float32, Geom.C_normal) # Проверка есть ли цвета вершин у объекта. if obj.data.vertex_colors.active: color = True # Создаем колонку для цвета c именем по умолчанию. geom_vertex_format.add_column("color", 4, Geom.NT_uint8, Geom.C_color) # Так же создаем дополнительные колонки. for col in obj.data.vertex_colors: # Если имя не совподает с активным. if not col.name == obj.data.vertex_colors.active.name: geom_vertex_format.add_column('color.{}'.format(col.name), 4, Geom.NT_uint8, Geom.C_color) # Проверка есть ли активные текстурные координаты у объекта. if obj.data.uv_layers.active: texcoord = True # Создаем колонку для координат c именем по умолчанию. geom_vertex_format.add_column("texcoord", 2, Geom.NT_float32, Geom.C_texcoord) # Так же создаем дополнительные колонки. for uv in obj.data.uv_layers: # Если имя не совподает с активным. if not uv.name == obj.data.uv_layers.active.name: geom_vertex_format.add_column('texcoord.{}'.format(uv.name), 2, Geom.NT_float32, Geom.C_texcoord) # Создаем формат. my_format = GeomVertexFormat() my_format.addArray(geom_vertex_format) # Регистрируем формат. end_format = GeomVertexFormat.registerFormat(my_format) return end_format, color, texcoord def geom_create(obj): geom_vertex_format = get_format(obj) color = geom_vertex_format[1] texcoord = geom_vertex_format[2] vdata = GeomVertexData(obj.data.name, geom_vertex_format[0], Geom.UHStatic) vdata.set_num_rows(len(obj.data.vertices)) vertex_position = GeomVertexWriter(vdata, 'vertex') normal_vertex = GeomVertexWriter(vdata, 'normal') # Если используются цвета вершин. if color: color_vertex_list = {'color': GeomVertexWriter(vdata, 'color')} # Так же создаем дополнительные слои. for col in obj.data.vertex_colors: # Если имя не совподает с активным. if not col.name == obj.data.vertex_colors.active.name: color_vertex_list[col.name] = GeomVertexWriter(vdata, 'color.{}'.format(col.name)) # Если используются координаты текстур. if texcoord: texcoord_vertex_list = {'texcoord': GeomVertexWriter(vdata, 'texcoord')} # Так же создаем дополнительные слои. for uv in obj.data.uv_layers: # Если имя не совподает с активным. if not uv.name == obj.data.uv_layers.active.name: texcoord_vertex_list[uv.name] = GeomVertexWriter(vdata, 'texcoord.{}'.format(uv.name)) # Запишем порядок треугольников. prim = GeomTriangles(Geom.UHStatic) prim.makeIndexed() prim.setIndexType(Geom.NT_uint32) mesh = obj.data mesh.calc_loop_triangles() # Сюда записиваются индексы обработаных вершин. list_vertext = {} # Проходим по треугольниуам. for triangle in mesh.loop_triangles: # Обработка первой вершины. if not triangle.loops[0] in list_vertext: vertex_position.set_row(triangle.loops[0]) normal_vertex.set_row(triangle.loops[0]) vertex_position.add_data3(obj.data.vertices[triangle.vertices[0]].co[0], obj.data.vertices[triangle.vertices[0]].co[1], obj.data.vertices[triangle.vertices[0]].co[2]) if triangle.use_smooth: normal_vertex.add_data3(obj.data.vertices[triangle.vertices[0]].normal[0], obj.data.vertices[triangle.vertices[0]].normal[1], obj.data.vertices[triangle.vertices[0]].normal[2]) else: normal_vertex.add_data3(triangle.normal[0], triangle.normal[1], triangle.normal[2]) if texcoord: for name in texcoord_vertex_list: texcoord_vertex_list[name].set_row(triangle.loops[0]) if name == 'texcoord': texcoord_vertex_list[name].addData2(obj.data.uv_layers.active.data[triangle.loops[0]].uv[0], obj.data.uv_layers.active.data[triangle.loops[0]].uv[1]) else: texcoord_vertex_list[name].addData2(obj.data.uv_layers[name].data[triangle.loops[0]].uv[0], obj.data.uv_layers[name].data[triangle.loops[0]].uv[1]) if color: for name in color_vertex_list: color_vertex_list[name].set_row(triangle.loops[0]) if name == 'color': color_vertex_list[name].addData4(obj.data.vertex_colors.active.data[triangle.loops[0]].color[0], obj.data.vertex_colors.active.data[triangle.loops[0]].color[1], obj.data.vertex_colors.active.data[triangle.loops[0]].color[2], obj.data.vertex_colors.active.data[triangle.loops[0]].color[3]) else: color_vertex_list[name].addData4(obj.data.vertex_colors[name].data[triangle.loops[0]].color[0], obj.data.vertex_colors[name].data[triangle.loops[0]].color[1], obj.data.vertex_colors[name].data[triangle.loops[0]].color[2], obj.data.vertex_colors[name].data[triangle.loops[0]].color[3]) list_vertext[triangle.loops[0]] = None # Обработка второй вершины. if not triangle.loops[1] in list_vertext: vertex_position.set_row(triangle.loops[1]) normal_vertex.set_row(triangle.loops[1]) vertex_position.add_data3(obj.data.vertices[triangle.vertices[1]].co[0], obj.data.vertices[triangle.vertices[1]].co[1], obj.data.vertices[triangle.vertices[1]].co[2]) if triangle.use_smooth: normal_vertex.add_data3(obj.data.vertices[triangle.vertices[1]].normal[0], obj.data.vertices[triangle.vertices[1]].normal[1], obj.data.vertices[triangle.vertices[1]].normal[2]) else: normal_vertex.add_data3(triangle.normal[0], triangle.normal[1], triangle.normal[2]) if texcoord: for name in texcoord_vertex_list: texcoord_vertex_list[name].set_row(triangle.loops[1]) if name == 'texcoord': texcoord_vertex_list[name].addData2(obj.data.uv_layers.active.data[triangle.loops[1]].uv[0], obj.data.uv_layers.active.data[triangle.loops[1]].uv[1]) else: texcoord_vertex_list[name].addData2(obj.data.uv_layers[name].data[triangle.loops[1]].uv[0], obj.data.uv_layers[name].data[triangle.loops[1]].uv[1]) if color: for name in color_vertex_list: color_vertex_list[name].set_row(triangle.loops[1]) if name == 'color': color_vertex_list[name].addData4(obj.data.vertex_colors.active.data[triangle.loops[1]].color[0], obj.data.vertex_colors.active.data[triangle.loops[1]].color[1], obj.data.vertex_colors.active.data[triangle.loops[1]].color[2], obj.data.vertex_colors.active.data[triangle.loops[1]].color[3]) else: color_vertex_list[name].addData4(obj.data.vertex_colors[name].data[triangle.loops[1]].color[0], obj.data.vertex_colors[name].data[triangle.loops[1]].color[1], obj.data.vertex_colors[name].data[triangle.loops[1]].color[2], obj.data.vertex_colors[name].data[triangle.loops[1]].color[3]) list_vertext[triangle.loops[1]] = None # Обработка третьей вершины. if not triangle.loops[2] in list_vertext: vertex_position.set_row(triangle.loops[2]) normal_vertex.set_row(triangle.loops[2]) vertex_position.add_data3(obj.data.vertices[triangle.vertices[2]].co[0], obj.data.vertices[triangle.vertices[2]].co[1], obj.data.vertices[triangle.vertices[2]].co[2]) if triangle.use_smooth: normal_vertex.add_data3(obj.data.vertices[triangle.vertices[2]].normal[0], obj.data.vertices[triangle.vertices[2]].normal[1], obj.data.vertices[triangle.vertices[2]].normal[2]) else: normal_vertex.add_data3(triangle.normal[0], triangle.normal[1], triangle.normal[2]) if texcoord: for name in texcoord_vertex_list: texcoord_vertex_list[name].set_row(triangle.loops[2]) if name == 'texcoord': texcoord_vertex_list[name].addData2(obj.data.uv_layers.active.data[triangle.loops[2]].uv[0], obj.data.uv_layers.active.data[triangle.loops[2]].uv[1]) else: texcoord_vertex_list[name].addData2(obj.data.uv_layers[name].data[triangle.loops[2]].uv[0], obj.data.uv_layers[name].data[triangle.loops[2]].uv[1]) if color: for name in color_vertex_list: color_vertex_list[name].set_row(triangle.loops[2]) if name == 'color': color_vertex_list[name].addData4(obj.data.vertex_colors.active.data[triangle.loops[2]].color[0], obj.data.vertex_colors.active.data[triangle.loops[2]].color[1], obj.data.vertex_colors.active.data[triangle.loops[2]].color[2], obj.data.vertex_colors.active.data[triangle.loops[2]].color[3]) else: color_vertex_list[name].addData4(obj.data.vertex_colors[name].data[triangle.loops[2]].color[0], obj.data.vertex_colors[name].data[triangle.loops[2]].color[1], obj.data.vertex_colors[name].data[triangle.loops[2]].color[2], obj.data.vertex_colors[name].data[triangle.loops[2]].color[3]) list_vertext[triangle.loops[2]] = None # Добавляем вершины в примитив. prim.addVertices(triangle.loops[0], triangle.loops[1], triangle.loops[2]) prim.closePrimitive() geom = Geom(vdata) geom.addPrimitive(prim) return geom def select_not_quad(obj): not_quad = [] for poly in obj.data.polygons: if len(poly.vertices) >= 5: not_quad.append(poly) for i in obj.data.vertices: i.select=False for i in obj.data.edges: i.select=False for i in obj.data.polygons: i.select = False for poly in not_quad: poly.select = True bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.select_mode(type="FACE") def check_coplanar(obj, poly): status = False # Если вершины три, это значит полигон автоматически копланарен. if len(poly.vertices) == 3: status = True elif len(poly.vertices) >= 3: v1 = obj.data.vertices[poly.vertices[1]].co - obj.data.vertices[poly.vertices[0]].co v2 = obj.data.vertices[poly.vertices[2]].co - obj.data.vertices[poly.vertices[0]].co for index in poly.vertices[3:]: if abs(distance_point_to_plane(obj.data.vertices[index].co, obj.data.vertices[poly.vertices[0]].co, v1.cross(v2))) < 1e-6: status = True else: status = False return status def select_not_coplanar(obj): not_coplanar = [] for poly in obj.data.polygons: if not check_coplanar(obj, poly): not_coplanar.append(poly) for i in obj.data.vertices: i.select=False for i in obj.data.edges: i.select=False for i in obj.data.polygons: i.select = False for poly in not_coplanar: poly.select = True bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.select_mode(type="FACE") def triangle_poly(poly, obj): trangle = {} triangulator3 = Triangulator3() index_tr = 0 for index in poly.vertices: triangulator3.add_polygon_vertex(index_tr) triangulator3.add_vertex(*obj.data.vertices[index].co) index_tr += 1 triangulator3.triangulate() for i in range(triangulator3.getNumTriangles()): v0 = triangulator3.get_vertex(triangulator3.get_triangle_v0(i)) v1 = triangulator3.get_vertex(triangulator3.get_triangle_v1(i)) v2 = triangulator3.get_vertex(triangulator3.get_triangle_v2(i)) trangle[i] = ((v0[0], v0[1], v0[2]), (v1[0], v1[1], v1[2]), (v2[0], v2[1], v2[2])) return trangle def add_polygons_to_dict(dict_named, poly, obj): # Если нет такого ключа в словаре. if not obj.data.materials[poly.material_index].name in dict_named: # Дабавляем ключ и список. dict_named[obj.data.materials[poly.material_index].name] = [poly] else: # Если есть такой ключ, добавляем к списку. dict_named[obj.data.materials[poly.material_index].name].append(poly) def colnode_add_dict(collision_node_dict, quad, name): if name in collision_node_dict: collision_node_dict[name].add_solid(quad) else: collision_node = CollisionNode(name) collision_node.add_solid(quad) collision_node_dict[name] = collision_node def collision_polygon_create(obj, scene): named_triangles = {} named_coplanar = {} named_not_coplanar = {} named_not_quad = {} triangles = [] coplanar = [] not_coplanar = [] not_quad = [] # Перебираем полигоны объекта. for poly in obj.data.polygons: # Если список материалов не пуст. if len(obj.data.materials) >= 1: # Если есть слот материала и он содержит имя, рассортировываем их по словарям под этим именем. if hasattr(obj.data.materials[poly.material_index], 'name'): # Если полигон из трех вершин, проверка на компланарность не нужна. if len(poly.vertices) == 3: for index in poly.vertices[2:]: add_polygons_to_dict(named_triangles, poly, obj) # Если у полигона четыре вершины, необходимо проверить на компланарность. elif len(poly.vertices) == 4: # Если полигон компланарный if check_coplanar(obj, poly): add_polygons_to_dict(named_coplanar, poly, obj) else: add_polygons_to_dict(named_not_coplanar, poly, obj) # Если у полигона более четырех вершин, необходимо разбить на треугольники. elif len(poly.vertices) >= 4: add_polygons_to_dict(named_not_quad, poly, obj) # Если нет материала, то рассортировываем по спискам else: # Если полигон из трех вершин, проверка на компланарность не нужна. if len(poly.vertices) == 3: for index in poly.vertices[2:]: triangles.append(poly) # Если у полигона четыре вершины, необходимо проверить на компланарность. elif len(poly.vertices) == 4: if check_coplanar(obj, poly): coplanar.append(poly) else: not_coplanar.append(poly) # Если у полигона более четырех вершин, необходимо разбить на треугольники. elif len(poly.vertices) >= 4: not_quad.append(poly) else: # Если полигон из трех вершин, проверка на компланарность не нужна. if len(poly.vertices) == 3: for index in poly.vertices[2:]: triangles.append(poly) # Если у полигона четыре вершины, необходимо проверить на компланарность. elif len(poly.vertices) == 4: if check_coplanar(obj, poly): coplanar.append(poly) else: not_coplanar.append(poly) # Если у полигона более четырех вершин, необходимо разбить на треугольники. elif len(poly.vertices) >= 4: not_quad.append(poly) ######################## ######################## group = NodePath(obj.name) collision_node_dict = {} vertext_quad = [] # Создаем полигоны столкновения из треугольников. for name in named_triangles: for poly in named_triangles[name]: for index in poly.vertices: vertext_quad.append(Point3(*obj.data.vertices[index].co)) colnode_add_dict(collision_node_dict, CollisionPolygon(vertext_quad[0], vertext_quad[1], vertext_quad[2]), name) vertext_quad = [] # Создаем полигоны столкновения из компланарных прямольников. for name in named_coplanar: for poly in named_coplanar[name]: for index in poly.vertices: vertext_quad.append(Point3(*obj.data.vertices[index].co)) colnode_add_dict(collision_node_dict, CollisionPolygon(vertext_quad[0], vertext_quad[1], vertext_quad[2], vertext_quad[3]), name) vertext_quad = [] # Создаем полигоны столкновения из некомпланарных прямольников. for name in named_not_coplanar: # Нужно разбить некомпланарные полигоны, на треугольники. for poly in named_not_coplanar[name]: for vertext in triangle_poly(poly, obj).values(): colnode_add_dict(collision_node_dict, CollisionPolygon(vertext[0], vertext[1], vertext[2]), name) # Создаем полигоны столкновения из многоугольников. for name in named_not_quad: # Нужно разбить многоугольники на треугольники. for poly in named_not_quad[name]: for vertext in triangle_poly(poly, obj).values(): colnode_add_dict(collision_node_dict, CollisionPolygon(vertext[0], vertext[1], vertext[2]), name) for collision_node in collision_node_dict.values(): from_mask = '{}{}{}{}{}{}{}{}'.format(obj.data.materials[collision_node.name].hatcher.from_mask_1.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.from_mask_2.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.from_mask_3.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.from_mask_4.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.from_mask_5.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.from_mask_6.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.from_mask_7.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.from_mask_8.decode('utf-8')) collision_node.setFromCollideMask(int(from_mask, 2)) into_mask = '{}{}{}{}{}{}{}{}'.format(obj.data.materials[collision_node.name].hatcher.into_mask_1.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.into_mask_2.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.into_mask_3.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.into_mask_4.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.into_mask_5.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.into_mask_6.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.into_mask_7.decode('utf-8'), obj.data.materials[collision_node.name].hatcher.into_mask_8.decode('utf-8')) collision_node.setIntoCollideMask(int(into_mask, 2)) node_path = NodePath(collision_node) node_path.reparentTo(group) if obj.data.materials[collision_node.name].hatcher.visibility_collision_polygons: node_path.show() collision_node = CollisionNode(obj.name) # Создаем полигоны столкновения из треугольников. for poly in triangles: for index in poly.vertices: vertext_quad.append(Point3(*obj.data.vertices[index].co)) quad = CollisionPolygon(vertext_quad[0], vertext_quad[1], vertext_quad[2]) collision_node.add_solid(quad) vertext_quad = [] # Создаем полигоны столкновения из компланарных прямольников. for poly in coplanar: for index in poly.vertices: vertext_quad.append(Point3(*obj.data.vertices[index].co)) quad = CollisionPolygon(vertext_quad[0], vertext_quad[1], vertext_quad[2], vertext_quad[3]) collision_node.add_solid(quad) vertext_quad = [] # Нужно разбить некомпланарные полигоны, на треугольники. for poly in not_coplanar: for vertext in triangle_poly(poly, obj).values(): quad = CollisionPolygon(vertext[0], vertext[1], vertext[2]) collision_node.add_solid(quad) # Нужно разбить полигоны у которых более четырех сторон на треугольники. for poly in not_quad: for vertext in triangle_poly(poly, obj).values(): quad = CollisionPolygon(vertext[0], vertext[1], vertext[2]) collision_node.add_solid(quad) from_mask = '{}{}{}{}{}{}{}{}'.format(obj.hatcher.from_mask_1.decode('utf-8'), obj.hatcher.from_mask_2.decode('utf-8'), obj.hatcher.from_mask_3.decode('utf-8'), obj.hatcher.from_mask_4.decode('utf-8'), obj.hatcher.from_mask_5.decode('utf-8'), obj.hatcher.from_mask_6.decode('utf-8'), obj.hatcher.from_mask_7.decode('utf-8'), obj.hatcher.from_mask_8.decode('utf-8')) collision_node.setFromCollideMask(int(from_mask, 2)) into_mask = '{}{}{}{}{}{}{}{}'.format(obj.hatcher.into_mask_1.decode('utf-8'), obj.hatcher.into_mask_2.decode('utf-8'), obj.hatcher.into_mask_3.decode('utf-8'), obj.hatcher.into_mask_4.decode('utf-8'), obj.hatcher.into_mask_5.decode('utf-8'), obj.hatcher.into_mask_6.decode('utf-8'), obj.hatcher.into_mask_7.decode('utf-8'), obj.hatcher.into_mask_8.decode('utf-8')) collision_node.setIntoCollideMask(int(into_mask, 2)) # Если полигон столкновения содержит тела. if collision_node.getNumSolids() >= 1: node_path = NodePath(collision_node) node_path.reparentTo(group) # Если стоит флажок показывать полигон столкновения. if obj.hatcher.visibility_collision_polygons: node_path.show() return group.node().getChild(0) def geom_node_create(obj, scene): geom = geom_create(obj) geom_node = GeomNode(obj.data.name) geom_node.addGeom(geom) return geom_node def camera_create(obj, scene): frame_size = obj.data.view_frame(scene = scene) if obj.data.type == 'PERSP': lens = PerspectiveLens() if obj.data.type == 'ORTHO': lens = OrthographicLens() lens.set_film_size(abs(frame_size[0][0]) + abs(frame_size[1][0]), abs(frame_size[0][1]) + abs(frame_size[1][1])) lens.set_focal_length(abs(frame_size[0][2])) lens.set_near_far(obj.data.clip_start, obj.data.clip_end) if obj.hatcher.coordinate_system == "CS_default": lens.set_coordinate_system(CS_default) if obj.hatcher.coordinate_system == "CS_zup_right": lens.set_coordinate_system(CS_zup_right) if obj.hatcher.coordinate_system == "CS_yup_right": lens.set_coordinate_system(CS_yup_right) if obj.hatcher.coordinate_system == "CS_zup_left": lens.set_coordinate_system(CS_zup_left) if obj.hatcher.coordinate_system == "CS_yup_left": lens.set_coordinate_system(CS_yup_left) if obj.hatcher.coordinate_system == "CS_invalid": lens.set_coordinate_system(CS_invalid) camera = Camera(obj.data.name) camera.active = obj.hatcher.camera_active bit = '{}{}{}{}{}{}{}{}'.format(obj.hatcher.draw_mask_1.decode('utf-8'), obj.hatcher.draw_mask_2.decode('utf-8'), obj.hatcher.draw_mask_3.decode('utf-8'), obj.hatcher.draw_mask_4.decode('utf-8'), obj.hatcher.draw_mask_5.decode('utf-8'), obj.hatcher.draw_mask_6.decode('utf-8'), obj.hatcher.draw_mask_7.decode('utf-8'), obj.hatcher.draw_mask_8.decode('utf-8')) camera.camera_mask = int(bit, 2) camera.set_lens(lens) return camera def build_hierarchy(obj, scene): # Узел для формирование иерархии root = NodePath("root") # Выполним рекурсию, для поиска всех. def recurse(obj, parent): # Переменая которая содережит функцию необходимую для экспорта данного типа объекта. create_object = None # Если объект является сеткой. if obj.type == "MESH": if obj.hatcher.type_mesh == "Render": create_object = geom_node_create if obj.hatcher.type_mesh == "Collision": create_object = collision_polygon_create # Если объект является источником цвета. if obj.type == "LIGHT": create_object = "LIGHT" # Если объект является камерой. if obj.type == "CAMERA": if obj.data.type != 'PANO': create_object = camera_create # Если есть родитель. if not parent: npp = NodePath(create_object(obj, scene)) #npp.setName(obj.name) #npp.show() npp.reparentTo(root) npp.set_transform(root, conversion_transform(obj)) else: # Если нет родителя. np = NodePath(create_object(obj, scene)) #np.setName(obj.name) #np.show() # Проверяем есть ли такой объект в иерархии. result = root.find('**/{}'.format(parent.name)) if result: np.reparentTo(result) np.set_transform(root, conversion_transform(obj)) else: np.reparentTo(root) np.set_transform(root, conversion_transform(obj)) # Проходим по детям. for child in obj.children: recurse(child, obj) recurse(obj, obj.parent) return root.node().getChild(0) import os from datetime import datetime class ExportObject(bpy.types.Operator): bl_idname = "ui.export_object" bl_label = "Generator_object" def execute(self, context): start_time = datetime.now() context.view_layer.update() # Перебираем список выбранных объектов. for obj in context.selected_objects: # Объединяем путь проекта и относительную директорию модели. path_project_save = os.path.join(context.scene.hatcher.ful_path_project, obj.hatcher.rel_path_object) # Проверяем существует ли директория, если нет то создаем. checkcreate_dirs(path_project_save) # Объединяем путь директории и имя файла. path_save = os.path.join(path_project_save, obj.name) node = build_hierarchy(obj, context.scene) root = ModelRoot('{}.bam'.format(obj.name)) root.add_child(node) bam_writer_file(path_save, root) show_message_box('Export object: {} completed, time: {}'.format(obj.name, datetime.now() - start_time), "Message") return {'FINISHED'} class ExportScene(bpy.types.Operator): bl_idname = "ui.export_scene" bl_label = "Generator_scene" def execute(self, context): start_time = datetime.now() context.view_layer.update() # Объединяем путь проекта и относительную директорию сцены. path_project_save = os.path.join(context.scene.hatcher.ful_path_project, context.scene.hatcher.rel_path_scene) # Проверяем существует ли директория, если нет то создаем. checkcreate_dirs(path_project_save) # Создаем корень для объединения. root = ModelRoot('{}.bam'.format(context.scene.name)) # Пройдем по всем объектом в сцене. for obj in context.scene.objects: # Нас интересуют объекты только без родителя. if not obj.parent: # Проверим есть ли данный тип объекта среди поддерживаемых. if obj.type in list_object_support: # Если есть ли подтип. if list_object_support[obj.type]: if not obj.data.type == 'PANO': node = build_hierarchy(obj, context.scene) root.add_child(node) else: node = build_hierarchy(obj, context.scene) root.add_child(node) # Объединяем путь директории и имя сцены. path_save = os.path.join(path_project_save, context.scene.name) bam_writer_file(path_save, root) show_message_box('Export scene, completed, time: {}'.format(datetime.now() - start_time), "Message") return {'FINISHED'} class ExportSelected(bpy.types.Operator): bl_idname = "ui.export_selected" bl_label = "Generator_selected" def execute(self, context): start_time = datetime.now() context.view_layer.update() # Объединяем путь проекта и относительную директорию сцены. path_project_save = os.path.join(context.scene.hatcher.ful_path_project, context.scene.hatcher.rel_path_other) # Проверяем существует ли директория, если нет то создаем. checkcreate_dirs(path_project_save) # Если поле имени файла заполнено, то объеденяем в один файл. if not context.scene.hatcher.file_name_selected == '': # Создаем корень для объединения. root = ModelRoot('{}.bam'.format(context.scene.hatcher.file_name_selected)) # Перебираем список выбранных объектов. for obj in context.selected_objects: # Проверим есть ли данный тип объекта среди поддерживаемых. if obj.type in list_object_support: # Если есть ли подтип. if list_object_support[obj.type]: if not obj.data.type == 'PANO': node = build_hierarchy(obj, context.scene) root.add_child(node) else: node = build_hierarchy(obj, context.scene) root.add_child(node) # Объединяем путь директории и имя файла. path_save = os.path.join(path_project_save, context.scene.hatcher.file_name_selected) bam_writer_file(path_save, root) # Если нет, то раздельно. else: # Перебираем список выбранных объектов. for obj in context.selected_objects: # Проверим есть ли данный тип объекта среди поддерживаемых. if obj.type in list_object_support: # Если есть ли подтип. if list_object_support[obj.type]: if not obj.data.type == 'PANO': node = build_hierarchy(obj, context.scene) # Объединяем путь директории и имя файла. path_save = os.path.join(path_project_save, obj.name) bam_writer_file(path_save, node) else: node = build_hierarchy(obj, context.scene) # Объединяем путь директории и имя файла. path_save = os.path.join(path_project_save, obj.name) bam_writer_file(path_save, node) show_message_box('Export selected, completed, time: {}'.format(datetime.now() - start_time), "Message") return {'FINISHED'} class CheckingCoplanarity(bpy.types.Operator): bl_idname = "ui.check_coplanarity" bl_label = "Checking_coplanarity" def execute(self, context): select_not_coplanar(context.object) return {'FINISHED'} class CheckingQuad(bpy.types.Operator): bl_idname = "ui.check_quad" bl_label = "Checking_quad" def execute(self, context): select_not_quad(context.object) return {'FINISHED'}
[ "panda3d.core.GeomVertexWriter", "panda3d.core.CollisionNode", "panda3d.core.GeomVertexFormat", "panda3d.core.GeomVertexFormat.registerFormat", "panda3d.core.GeomVertexData", "panda3d.core.Point3", "panda3d.core.PerspectiveLens", "os.path.join", "panda3d.core.CollisionPolygon", "panda3d.core.Filename.fromOsSpecific", "panda3d.core.Camera", "panda3d.core.GeomVertexArrayFormat", "os.path.exists", "bpy.context.window_manager.popup_menu", "bpy.ops.mesh.select_mode", "panda3d.core.Triangulator3", "panda3d.core.GeomTriangles", "datetime.datetime.now", "panda3d.core.OrthographicLens", "panda3d.core.Geom", "panda3d.core.NodePath", "os.makedirs", "panda3d.core.GeomNode", "panda3d.core.TransformState.make_pos_quat_scale", "bpy.ops.object.mode_set", "panda3d.core.Notify.out", "panda3d.core.BamFile" ]
[((618, 630), 'panda3d.core.Notify.out', 'Notify.out', ([], {}), '()\n', (628, 630), False, 'from panda3d.core import BamFile, BamWriter, Filename, Notify\n'), ((869, 936), 'bpy.context.window_manager.popup_menu', 'bpy.context.window_manager.popup_menu', (['draw'], {'title': 'title', 'icon': 'icon'}), '(draw, title=title, icon=icon)\n', (906, 936), False, 'import bpy\n'), ((1275, 1284), 'panda3d.core.BamFile', 'BamFile', ([], {}), '()\n', (1282, 1284), False, 'from panda3d.core import BamFile, BamWriter, Filename, Notify\n'), ((1499, 1536), 'panda3d.core.Point3', 'Point3', (['*obj.matrix_world.translation'], {}), '(*obj.matrix_world.translation)\n', (1505, 1536), False, 'from panda3d.core import Point3, TransformState, LQuaternion\n'), ((1660, 1712), 'panda3d.core.TransformState.make_pos_quat_scale', 'TransformState.make_pos_quat_scale', (['pos', 'quat', 'scale'], {}), '(pos, quat, scale)\n', (1694, 1712), False, 'from panda3d.core import Point3, TransformState, LQuaternion\n'), ((1851, 1874), 'panda3d.core.GeomVertexArrayFormat', 'GeomVertexArrayFormat', ([], {}), '()\n', (1872, 1874), False, 'from panda3d.core import GeomVertexArrayFormat, Geom, GeomVertexFormat, GeomVertexData, GeomVertexWriter, Triangulator3, GeomTriangles\n'), ((3268, 3286), 'panda3d.core.GeomVertexFormat', 'GeomVertexFormat', ([], {}), '()\n', (3284, 3286), False, 'from panda3d.core import GeomVertexArrayFormat, Geom, GeomVertexFormat, GeomVertexData, GeomVertexWriter, Triangulator3, GeomTriangles\n'), ((3375, 3417), 'panda3d.core.GeomVertexFormat.registerFormat', 'GeomVertexFormat.registerFormat', (['my_format'], {}), '(my_format)\n', (3406, 3417), False, 'from panda3d.core import GeomVertexArrayFormat, Geom, GeomVertexFormat, GeomVertexData, GeomVertexWriter, Triangulator3, GeomTriangles\n'), ((3609, 3676), 'panda3d.core.GeomVertexData', 'GeomVertexData', (['obj.data.name', 'geom_vertex_format[0]', 'Geom.UHStatic'], {}), '(obj.data.name, geom_vertex_format[0], Geom.UHStatic)\n', (3623, 3676), False, 'from panda3d.core import GeomVertexArrayFormat, Geom, GeomVertexFormat, GeomVertexData, GeomVertexWriter, Triangulator3, GeomTriangles\n'), ((3747, 3780), 'panda3d.core.GeomVertexWriter', 'GeomVertexWriter', (['vdata', '"""vertex"""'], {}), "(vdata, 'vertex')\n", (3763, 3780), False, 'from panda3d.core import GeomVertexArrayFormat, Geom, GeomVertexFormat, GeomVertexData, GeomVertexWriter, Triangulator3, GeomTriangles\n'), ((3801, 3834), 'panda3d.core.GeomVertexWriter', 'GeomVertexWriter', (['vdata', '"""normal"""'], {}), "(vdata, 'normal')\n", (3817, 3834), False, 'from panda3d.core import GeomVertexArrayFormat, Geom, GeomVertexFormat, GeomVertexData, GeomVertexWriter, Triangulator3, GeomTriangles\n'), ((4754, 4782), 'panda3d.core.GeomTriangles', 'GeomTriangles', (['Geom.UHStatic'], {}), '(Geom.UHStatic)\n', (4767, 4782), False, 'from panda3d.core import GeomVertexArrayFormat, Geom, GeomVertexFormat, GeomVertexData, GeomVertexWriter, Triangulator3, GeomTriangles\n'), ((12779, 12790), 'panda3d.core.Geom', 'Geom', (['vdata'], {}), '(vdata)\n', (12783, 12790), False, 'from panda3d.core import GeomVertexArrayFormat, Geom, GeomVertexFormat, GeomVertexData, GeomVertexWriter, Triangulator3, GeomTriangles\n'), ((13214, 13250), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""EDIT"""'}), "(mode='EDIT')\n", (13237, 13250), False, 'import bpy\n'), ((13255, 13292), 'bpy.ops.mesh.select_mode', 'bpy.ops.mesh.select_mode', ([], {'type': '"""FACE"""'}), "(type='FACE')\n", (13279, 13292), False, 'import bpy\n'), ((14380, 14416), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""EDIT"""'}), "(mode='EDIT')\n", (14403, 14416), False, 'import bpy\n'), ((14421, 14458), 'bpy.ops.mesh.select_mode', 'bpy.ops.mesh.select_mode', ([], {'type': '"""FACE"""'}), "(type='FACE')\n", (14445, 14458), False, 'import bpy\n'), ((14529, 14544), 'panda3d.core.Triangulator3', 'Triangulator3', ([], {}), '()\n', (14542, 14544), False, 'from panda3d.core import GeomVertexArrayFormat, Geom, GeomVertexFormat, GeomVertexData, GeomVertexWriter, Triangulator3, GeomTriangles\n'), ((18851, 18869), 'panda3d.core.NodePath', 'NodePath', (['obj.name'], {}), '(obj.name)\n', (18859, 18869), False, 'from panda3d.core import GeomNode, PandaNode, NodePath, ModelRoot\n'), ((22512, 22535), 'panda3d.core.CollisionNode', 'CollisionNode', (['obj.name'], {}), '(obj.name)\n', (22525, 22535), False, 'from panda3d.core import CollisionPolygon, CollisionNode\n'), ((25170, 25193), 'panda3d.core.GeomNode', 'GeomNode', (['obj.data.name'], {}), '(obj.data.name)\n', (25178, 25193), False, 'from panda3d.core import GeomNode, PandaNode, NodePath, ModelRoot\n'), ((26346, 26367), 'panda3d.core.Camera', 'Camera', (['obj.data.name'], {}), '(obj.data.name)\n', (26352, 26367), False, 'from panda3d.core import Camera, PerspectiveLens, OrthographicLens, CS_default, CS_zup_right, CS_yup_right, CS_zup_left, CS_yup_left, CS_invalid\n'), ((26985, 27001), 'panda3d.core.NodePath', 'NodePath', (['"""root"""'], {}), "('root')\n", (26993, 27001), False, 'from panda3d.core import GeomNode, PandaNode, NodePath, ModelRoot\n'), ((1058, 1091), 'os.path.exists', 'os.path.exists', (['path_project_save'], {}), '(path_project_save)\n', (1072, 1091), False, 'import os\n'), ((1304, 1347), 'panda3d.core.Filename.fromOsSpecific', 'Filename.fromOsSpecific', (["(path_save + '.bam')"], {}), "(path_save + '.bam')\n", (1327, 1347), False, 'from panda3d.core import BamFile, BamWriter, Filename, Notify\n'), ((15742, 15761), 'panda3d.core.CollisionNode', 'CollisionNode', (['name'], {}), '(name)\n', (15755, 15761), False, 'from panda3d.core import CollisionPolygon, CollisionNode\n'), ((22309, 22333), 'panda3d.core.NodePath', 'NodePath', (['collision_node'], {}), '(collision_node)\n', (22317, 22333), False, 'from panda3d.core import GeomNode, PandaNode, NodePath, ModelRoot\n'), ((22739, 22806), 'panda3d.core.CollisionPolygon', 'CollisionPolygon', (['vertext_quad[0]', 'vertext_quad[1]', 'vertext_quad[2]'], {}), '(vertext_quad[0], vertext_quad[1], vertext_quad[2])\n', (22755, 22806), False, 'from panda3d.core import CollisionPolygon, CollisionNode\n'), ((23086, 23174), 'panda3d.core.CollisionPolygon', 'CollisionPolygon', (['vertext_quad[0]', 'vertext_quad[1]', 'vertext_quad[2]', 'vertext_quad[3]'], {}), '(vertext_quad[0], vertext_quad[1], vertext_quad[2],\n vertext_quad[3])\n', (23102, 23174), False, 'from panda3d.core import CollisionPolygon, CollisionNode\n'), ((24842, 24866), 'panda3d.core.NodePath', 'NodePath', (['collision_node'], {}), '(collision_node)\n', (24850, 24866), False, 'from panda3d.core import GeomNode, PandaNode, NodePath, ModelRoot\n'), ((25378, 25395), 'panda3d.core.PerspectiveLens', 'PerspectiveLens', ([], {}), '()\n', (25393, 25395), False, 'from panda3d.core import Camera, PerspectiveLens, OrthographicLens, CS_default, CS_zup_right, CS_yup_right, CS_zup_left, CS_yup_left, CS_invalid\n'), ((25445, 25463), 'panda3d.core.OrthographicLens', 'OrthographicLens', ([], {}), '()\n', (25461, 25463), False, 'from panda3d.core import Camera, PerspectiveLens, OrthographicLens, CS_default, CS_zup_right, CS_yup_right, CS_zup_left, CS_yup_left, CS_invalid\n'), ((28911, 28925), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (28923, 28925), False, 'from datetime import datetime\n'), ((30006, 30020), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (30018, 30020), False, 'from datetime import datetime\n'), ((30155, 30250), 'os.path.join', 'os.path.join', (['context.scene.hatcher.ful_path_project', 'context.scene.hatcher.rel_path_scene'], {}), '(context.scene.hatcher.ful_path_project, context.scene.hatcher.\n rel_path_scene)\n', (30167, 30250), False, 'import os\n'), ((31684, 31698), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (31696, 31698), False, 'from datetime import datetime\n'), ((31833, 31928), 'os.path.join', 'os.path.join', (['context.scene.hatcher.ful_path_project', 'context.scene.hatcher.rel_path_other'], {}), '(context.scene.hatcher.ful_path_project, context.scene.hatcher.\n rel_path_other)\n', (31845, 31928), False, 'import os\n'), ((1118, 1148), 'os.makedirs', 'os.makedirs', (['path_project_save'], {}), '(path_project_save)\n', (1129, 1148), False, 'import os\n'), ((3926, 3958), 'panda3d.core.GeomVertexWriter', 'GeomVertexWriter', (['vdata', '"""color"""'], {}), "(vdata, 'color')\n", (3942, 3958), False, 'from panda3d.core import GeomVertexArrayFormat, Geom, GeomVertexFormat, GeomVertexData, GeomVertexWriter, Triangulator3, GeomTriangles\n'), ((4370, 4405), 'panda3d.core.GeomVertexWriter', 'GeomVertexWriter', (['vdata', '"""texcoord"""'], {}), "(vdata, 'texcoord')\n", (4386, 4405), False, 'from panda3d.core import GeomVertexArrayFormat, Geom, GeomVertexFormat, GeomVertexData, GeomVertexWriter, Triangulator3, GeomTriangles\n'), ((23406, 23458), 'panda3d.core.CollisionPolygon', 'CollisionPolygon', (['vertext[0]', 'vertext[1]', 'vertext[2]'], {}), '(vertext[0], vertext[1], vertext[2])\n', (23422, 23458), False, 'from panda3d.core import CollisionPolygon, CollisionNode\n'), ((23683, 23735), 'panda3d.core.CollisionPolygon', 'CollisionPolygon', (['vertext[0]', 'vertext[1]', 'vertext[2]'], {}), '(vertext[0], vertext[1], vertext[2])\n', (23699, 23735), False, 'from panda3d.core import CollisionPolygon, CollisionNode\n'), ((29162, 29248), 'os.path.join', 'os.path.join', (['context.scene.hatcher.ful_path_project', 'obj.hatcher.rel_path_object'], {}), '(context.scene.hatcher.ful_path_project, obj.hatcher.\n rel_path_object)\n', (29174, 29248), False, 'import os\n'), ((29443, 29484), 'os.path.join', 'os.path.join', (['path_project_save', 'obj.name'], {}), '(path_project_save, obj.name)\n', (29455, 29484), False, 'import os\n'), ((33025, 33098), 'os.path.join', 'os.path.join', (['path_project_save', 'context.scene.hatcher.file_name_selected'], {}), '(path_project_save, context.scene.hatcher.file_name_selected)\n', (33037, 33098), False, 'import os\n'), ((19218, 19285), 'panda3d.core.CollisionPolygon', 'CollisionPolygon', (['vertext_quad[0]', 'vertext_quad[1]', 'vertext_quad[2]'], {}), '(vertext_quad[0], vertext_quad[1], vertext_quad[2])\n', (19234, 19285), False, 'from panda3d.core import CollisionPolygon, CollisionNode\n'), ((19628, 19716), 'panda3d.core.CollisionPolygon', 'CollisionPolygon', (['vertext_quad[0]', 'vertext_quad[1]', 'vertext_quad[2]', 'vertext_quad[3]'], {}), '(vertext_quad[0], vertext_quad[1], vertext_quad[2],\n vertext_quad[3])\n', (19644, 19716), False, 'from panda3d.core import CollisionPolygon, CollisionNode\n'), ((22686, 22722), 'panda3d.core.Point3', 'Point3', (['*obj.data.vertices[index].co'], {}), '(*obj.data.vertices[index].co)\n', (22692, 22722), False, 'from panda3d.core import Point3, TransformState, LQuaternion\n'), ((23033, 23069), 'panda3d.core.Point3', 'Point3', (['*obj.data.vertices[index].co'], {}), '(*obj.data.vertices[index].co)\n', (23039, 23069), False, 'from panda3d.core import Point3, TransformState, LQuaternion\n'), ((31273, 31324), 'os.path.join', 'os.path.join', (['path_project_save', 'context.scene.name'], {}), '(path_project_save, context.scene.name)\n', (31285, 31324), False, 'import os\n'), ((19130, 19166), 'panda3d.core.Point3', 'Point3', (['*obj.data.vertices[index].co'], {}), '(*obj.data.vertices[index].co)\n', (19136, 19166), False, 'from panda3d.core import Point3, TransformState, LQuaternion\n'), ((19540, 19576), 'panda3d.core.Point3', 'Point3', (['*obj.data.vertices[index].co'], {}), '(*obj.data.vertices[index].co)\n', (19546, 19576), False, 'from panda3d.core import Point3, TransformState, LQuaternion\n'), ((20083, 20135), 'panda3d.core.CollisionPolygon', 'CollisionPolygon', (['vertext[0]', 'vertext[1]', 'vertext[2]'], {}), '(vertext[0], vertext[1], vertext[2])\n', (20099, 20135), False, 'from panda3d.core import CollisionPolygon, CollisionNode\n'), ((20446, 20498), 'panda3d.core.CollisionPolygon', 'CollisionPolygon', (['vertext[0]', 'vertext[1]', 'vertext[2]'], {}), '(vertext[0], vertext[1], vertext[2])\n', (20462, 20498), False, 'from panda3d.core import CollisionPolygon, CollisionNode\n'), ((31444, 31458), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (31456, 31458), False, 'from datetime import datetime\n'), ((34239, 34253), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (34251, 34253), False, 'from datetime import datetime\n'), ((29775, 29789), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (29787, 29789), False, 'from datetime import datetime\n'), ((34068, 34109), 'os.path.join', 'os.path.join', (['path_project_save', 'obj.name'], {}), '(path_project_save, obj.name)\n', (34080, 34109), False, 'import os\n'), ((33770, 33811), 'os.path.join', 'os.path.join', (['path_project_save', 'obj.name'], {}), '(path_project_save, obj.name)\n', (33782, 33811), False, 'import os\n')]
# -*- coding: utf-8 -*- import pandas as pd import plotly.graph_objs as go import requests from base64 import b64encode as be from dash_html_components import Th, Tr, Td, A from datetime import datetime, timedelta from flask import request from folium import Map from operator import itemgetter from os.path import join, dirname, realpath from random import randint from requests.auth import HTTPBasicAuth from .maputils import create_dcircle_marker, create_tcircle_marker from .utils import ( api_request_to_json, json_to_dataframe, starttime_str_to_seconds, ) TMP = join(dirname(realpath(__file__)), '../tmp/') LCL = join(dirname(realpath(__file__)), '../images/') def get_rsam(ch, st): j = api_request_to_json(f'rsam?channel={ch}&starttime={st}') data = [] d = pd.DataFrame(j['records'][ch]) if not d.empty: d.set_index('date', inplace=True) data = [go.Scatter( x=d.index, y=d.rsam, mode='markers', marker=dict(size=4) )] return { 'data': data, 'layout': { 'margin': { 't': 30 }, 'xaxis': { 'range': [d.index.min(), d.index.max()] }, 'yaxis': { 'range': [d.rsam.min() - 20, 2 * d.rsam.mean()] } } } def get_tilt(ch, st): j = api_request_to_json(f'tilt?channel={ch}&starttime={st}') d = pd.DataFrame(j['records'][ch]) traces = [] if not d.empty: d.set_index('date', inplace=True) traces.append({ 'x': d.index, 'y': d['radial'], 'name': f"radial {j['used_azimuth']:.1f}" }) traces.append({ 'x': d.index, 'y': d['tangential'], 'name': f"tangential {j['tangential_azimuth']:.1f}" }) return { 'data': traces, 'layout': { 'margin': { 't': 30 } } } def get_rtnet(ch, st): j = api_request_to_json(f'rtnet?channel={ch}&starttime={st}') d = pd.DataFrame(j['records'][ch]) traces = [] if not d.empty: d.set_index('date', inplace=True) traces.append({ 'x': d.index, 'y': d.east, 'name': 'East', 'mode': 'markers', 'marker': dict( size=4 ) }) traces.append({ 'x': d.index, 'y': d.north, 'name': 'North', 'mode': 'markers', 'marker': dict( size=4 ) }) traces.append({ 'x': d.index, 'y': d.up, 'name': 'Up', 'mode': 'markers', 'marker': dict( size=4 ) }) return { 'data': traces, 'layout': { 'margin': { 't': 30 } } } def get_and_store_hypos(geo, st, current_data): if is_data_needed(st, current_data): return get_hypos(geo, st).to_json() else: return current_data def is_data_needed(st, data): if not data: return True now = datetime.now() olddata = pd.read_json(data) mindate = olddata.date.min() maxdate = olddata.date.max() td = now - mindate # Requested more than is currently stored? seconds = starttime_str_to_seconds(st) if seconds > (td.days * 86400 + td.seconds): return True # Data is old td = now - maxdate if (td.seconds / 60) > 10: return True return False def get_hypos(geo, st): j = api_request_to_json(f'hypocenter?geo={geo}&starttime={st}') d = pd.DataFrame(j['records']) if not d.empty: d['date'] = d['date'].str.slice(stop=-2) d['date'] = pd.to_datetime(d['date']) d.reset_index(drop=True, inplace=True) return d def get_hypos_map(st, kind, data, region): filename = f'{TMP}hypos{randint(0,9999):04d}.html' d = json_to_dataframe(st, data) m = None if region == 'kism': m = Map(location=[19.41, -155.27], min_zoom=12, max_zoom=15, zoom_start=13, tiles='Stamen Terrain') elif region == 'lerz': m = Map(location=[19.43, -154.88], min_zoom=11, max_zoom=15, zoom_start=11, tiles='Stamen Terrain') if kind == 'T': mid = d.date.min() mad = d.date.max() d.apply(create_tcircle_marker, arg=(m, mid, mad), axis=1) elif kind == 'A': d.apply(create_dcircle_marker, args=(m,), axis=1) m.save(filename) return open(filename, 'r').read() def get_hypos_legend(kind): encoded_img = None if kind == 'A': encoded_img = be(open(f'{LCL}dlegend.png', 'rb').read()) elif kind == 'T': encoded_img = be(open(f'{LCL}tlegend.png', 'rb').read()) return f"data:image/jpg;base64,{encoded_img.decode('utf8')}" def get_hypos_table(st, data): d = json_to_dataframe(st, data) if not d.empty: d.sort_values('date', inplace=True) return d.to_dict('records') def get_hypo_counts(st, data): d = json_to_dataframe(st, data) data = [] if not d.empty: d.sort_values('date', inplace=True) d['moment'] = d.prefMag.apply(lambda x: pow(10.0, 16.0 + ((3.0 * x)/2.0))) d['cmoment'] = d.moment.cumsum() bins = d.groupby(pd.Grouper(freq='60min', key='date')).count() data = [go.Bar( { 'x': bins.index, 'y': bins.depth, 'name': 'Count' }), go.Scatter( { 'x': d.date, 'y': d.cmoment, 'name': 'Moment', 'yaxis': 'y2' })] return { 'data': data, 'layout': { 'margin': { 't': 30 }, 'showlegend': False, 'yaxis': { 'title': 'Earthquakes per Hour' }, 'yaxis2': { 'title': 'Cumulative Moment (dyn-cm)', 'showgrid': False, 'overlaying': 'y', 'side': 'right' } } } def get_spectrogram(src): now = datetime.utcnow() d = now.timetuple().tm_yday tm = now - timedelta(minutes=now.minute % 10, seconds=now.second, microseconds=now.microsecond) if 'ipensive' in src: t = '%d%s%s-%s%s' % (now.year, str(now.month).zfill(2), str(now.day).zfill(2), str(tm.hour).zfill(2), str(tm.minute).zfill(2)) else: t = '%d%s-%s%s' % (now.year, str(d).zfill(3), str(tm.hour).zfill(2), str(tm.minute).zfill(2)) return src.format(now.year, d, t) def get_helicorder(ch): url = f'a=plot&o=png&tz=Pacific/Honolulu&w=900&h=636&n=1&x.0=75&y.0=20' \ f'&w.0=750&h.0=576&mh.0=900&chCnt.0=1' \ f'&src.0=hvo_seismic_winston_helicorders&st.0=-28800000&et.0=N' \ f'&chNames.0={ch}&dataTypes.0=275.000000&tc.0=15&barMult.0=3' \ f'&sc.0=T&plotSeparately.0=false' encoded_img = be(open(get_valve_plot(url), 'rb').read()) return f"data:image/jpg;base64,{encoded_img.decode('utf8')}" def get_tiltv(region): chs = '' if region == 'kism': chs = '18,20' elif region == 'merz': chs = '15,16' url = f'a=plot&o=png&tz=Pacific/Honolulu&w=900&h=1740&n=1&x.0=75&y.0=20' \ f'&w.0=750&h.0=240&mh.0=900&chCnt.0=7&src.0=hvo_def_tilt' \ f'&st.0=-28800000&et.0=N&lg.0=true&ch.0={chs}' \ f'&dataTypes.0=NaN&plotType.0=tv&rk.0=1&ds.0=None&dsInt.0=&sdt.0=' \ f'&az.0=n&azval.0=&linetype.0=l&ysLMin.0=&ysLMax.0=&ysRMin.0=' \ f'&ysRMax.0=&despike_period.0=&filter_arg1.0=&filter_arg2.0=' \ f'&despike.0=F&detrend.0=F&dmo_fl.0=0&filter_arg3.0=' \ f'&dmo_arithmetic.0=None&dmo_arithmetic_value.0=&dmo_db.0=0' \ f'&debias_period.0=&radial.0=T&tangential.0=T&xTilt.0=F&yTilt.0=F' \ f'&magnitude.0=F&azimuth.0=F&holeTemp.0=F&boxTemp.0=F&instVolt.0=F' \ f'&rainfall.0=F&vs.0=&plotSeparately.0=false' encoded_img = be(open(get_valve_plot(url), 'rb').read()) return f"data:image/jpg;base64,{encoded_img.decode('utf8')}" def get_valve_plot(itm): filename = f'{TMP}valve{randint(0,9999):04d}.jpg' url = f'https://hvovalve.wr.usgs.gov/valve3/valve3.jsp?{itm}' u = request.authorization.username p = request.authorization.password r = requests.get(url, auth=HTTPBasicAuth(u, p)) with open(filename, 'wb') as f: f.write(r.content) return filename def get_ash3d_img(): url = ('https://volcanoes.usgs.gov/vsc/captures/ash3d/' '332010_1008443_D_deposit.gif') return url def get_logs(max_rows=20): p = api_request_to_json('logs')['posts'] headers = ['Post', 'Author', 'Date'] d = sorted(p, key=itemgetter('date'), reverse=True) link = 'https://hvointernal.wr.usgs.gov/hvo_logs/read?id={}' return [[Tr([Th(col) for col in headers])] + [Tr([ Td(A(href=link.format(d[i]['id']), children='%s' % d[i]['subject'], target='_blank')), Td(children='%s' % d[i]['user']), Td(children='%s' % d[i]['date']) ]) for i in range(0, max_rows)]] def get_so2emissions(ch, st): j = api_request_to_json(f'so2emissions?channel={ch}&starttime={st}') data = [] d = pd.DataFrame(j['records'][ch]) if not d.empty: d.set_index('date', inplace=True) data = [go.Scatter( x=d.index, y=d.so2, mode='markers', marker=dict(size=10) )] return { 'data': data, 'layout': { 'margin': { 't': 30 } } } def get_nps_so2(ch, st): j = api_request_to_json(f'npsadvisory?channel={ch}&starttime={st}') data = [] d = pd.DataFrame(j['records'][ch]) if not d.empty: d.set_index('date', inplace=True) data = [go.Scatter( x=d.index, y=d.avgso2, mode='markers', marker=dict(size=6) )] return { 'data': data, 'layout': { 'margin': { 't': 30 }, 'yaxis': { 'exponentformat': 'none' } } } def get_nps_wind(ch, st): url = (f'npsadvisory?channel={ch}&starttime={st}&series=windspeed,winddir') j = api_request_to_json(url) data = [] d = pd.DataFrame(j['records'][ch]) if not d.empty: d.set_index('date', inplace=True) data = [go.Scatter( x=d.index, y=d.windspeed, name='Wind Speed', mode='markers', marker=dict(size=6) ), go.Scatter( x=d.index, y=d.winddir, name='Wind Dir', yaxis='y2', mode='markers', marker=dict(size=6) )] return { 'data': data, 'layout': { 'margin': { 't': 30 }, 'yaxis': { 'title': 'Windspeed (m/s)' }, 'yaxis2': { 'title': 'Wind Direction (deg)', 'showgrid': False, 'overlaying': 'y', 'side': 'right' } } }
[ "pandas.DataFrame", "dash_html_components.Td", "random.randint", "plotly.graph_objs.Scatter", "os.path.realpath", "pandas.read_json", "datetime.datetime.utcnow", "pandas.to_datetime", "datetime.timedelta", "folium.Map", "pandas.Grouper", "dash_html_components.Th", "requests.auth.HTTPBasicAuth", "operator.itemgetter", "datetime.datetime.now", "plotly.graph_objs.Bar" ]
[((792, 822), 'pandas.DataFrame', 'pd.DataFrame', (["j['records'][ch]"], {}), "(j['records'][ch])\n", (804, 822), True, 'import pandas as pd\n'), ((1455, 1485), 'pandas.DataFrame', 'pd.DataFrame', (["j['records'][ch]"], {}), "(j['records'][ch])\n", (1467, 1485), True, 'import pandas as pd\n'), ((2102, 2132), 'pandas.DataFrame', 'pd.DataFrame', (["j['records'][ch]"], {}), "(j['records'][ch])\n", (2114, 2132), True, 'import pandas as pd\n'), ((3226, 3240), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3238, 3240), False, 'from datetime import datetime, timedelta\n'), ((3255, 3273), 'pandas.read_json', 'pd.read_json', (['data'], {}), '(data)\n', (3267, 3273), True, 'import pandas as pd\n'), ((3733, 3759), 'pandas.DataFrame', 'pd.DataFrame', (["j['records']"], {}), "(j['records'])\n", (3745, 3759), True, 'import pandas as pd\n'), ((6351, 6368), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (6366, 6368), False, 'from datetime import datetime, timedelta\n'), ((9662, 9692), 'pandas.DataFrame', 'pd.DataFrame', (["j['records'][ch]"], {}), "(j['records'][ch])\n", (9674, 9692), True, 'import pandas as pd\n'), ((10153, 10183), 'pandas.DataFrame', 'pd.DataFrame', (["j['records'][ch]"], {}), "(j['records'][ch])\n", (10165, 10183), True, 'import pandas as pd\n'), ((10767, 10797), 'pandas.DataFrame', 'pd.DataFrame', (["j['records'][ch]"], {}), "(j['records'][ch])\n", (10779, 10797), True, 'import pandas as pd\n'), ((595, 613), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (603, 613), False, 'from os.path import join, dirname, realpath\n'), ((646, 664), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (654, 664), False, 'from os.path import join, dirname, realpath\n'), ((3849, 3874), 'pandas.to_datetime', 'pd.to_datetime', (["d['date']"], {}), "(d['date'])\n", (3863, 3874), True, 'import pandas as pd\n'), ((4121, 4220), 'folium.Map', 'Map', ([], {'location': '[19.41, -155.27]', 'min_zoom': '(12)', 'max_zoom': '(15)', 'zoom_start': '(13)', 'tiles': '"""Stamen Terrain"""'}), "(location=[19.41, -155.27], min_zoom=12, max_zoom=15, zoom_start=13,\n tiles='Stamen Terrain')\n", (4124, 4220), False, 'from folium import Map\n'), ((6416, 6505), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(now.minute % 10)', 'seconds': 'now.second', 'microseconds': 'now.microsecond'}), '(minutes=now.minute % 10, seconds=now.second, microseconds=now.\n microsecond)\n', (6425, 6505), False, 'from datetime import datetime, timedelta\n'), ((4008, 4024), 'random.randint', 'randint', (['(0)', '(9999)'], {}), '(0, 9999)\n', (4015, 4024), False, 'from random import randint\n'), ((4272, 4371), 'folium.Map', 'Map', ([], {'location': '[19.43, -154.88]', 'min_zoom': '(11)', 'max_zoom': '(15)', 'zoom_start': '(11)', 'tiles': '"""Stamen Terrain"""'}), "(location=[19.43, -154.88], min_zoom=11, max_zoom=15, zoom_start=11,\n tiles='Stamen Terrain')\n", (4275, 4371), False, 'from folium import Map\n'), ((5514, 5573), 'plotly.graph_objs.Bar', 'go.Bar', (["{'x': bins.index, 'y': bins.depth, 'name': 'Count'}"], {}), "({'x': bins.index, 'y': bins.depth, 'name': 'Count'})\n", (5520, 5573), True, 'import plotly.graph_objs as go\n'), ((5670, 5744), 'plotly.graph_objs.Scatter', 'go.Scatter', (["{'x': d.date, 'y': d.cmoment, 'name': 'Moment', 'yaxis': 'y2'}"], {}), "({'x': d.date, 'y': d.cmoment, 'name': 'Moment', 'yaxis': 'y2'})\n", (5680, 5744), True, 'import plotly.graph_objs as go\n'), ((8502, 8518), 'random.randint', 'randint', (['(0)', '(9999)'], {}), '(0, 9999)\n', (8509, 8518), False, 'from random import randint\n'), ((8703, 8722), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['u', 'p'], {}), '(u, p)\n', (8716, 8722), False, 'from requests.auth import HTTPBasicAuth\n'), ((9085, 9103), 'operator.itemgetter', 'itemgetter', (['"""date"""'], {}), "('date')\n", (9095, 9103), False, 'from operator import itemgetter\n'), ((5452, 5488), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""60min"""', 'key': '"""date"""'}), "(freq='60min', key='date')\n", (5462, 5488), True, 'import pandas as pd\n'), ((9201, 9208), 'dash_html_components.Th', 'Th', (['col'], {}), '(col)\n', (9203, 9208), False, 'from dash_html_components import Th, Tr, Td, A\n'), ((9408, 9440), 'dash_html_components.Td', 'Td', ([], {'children': "('%s' % d[i]['user'])"}), "(children='%s' % d[i]['user'])\n", (9410, 9440), False, 'from dash_html_components import Th, Tr, Td, A\n'), ((9457, 9489), 'dash_html_components.Td', 'Td', ([], {'children': "('%s' % d[i]['date'])"}), "(children='%s' % d[i]['date'])\n", (9459, 9489), False, 'from dash_html_components import Th, Tr, Td, A\n')]
#!/usr/bin/python # coding:utf8 """ @author: <NAME> @time: 2019-12-07 20:51 """ import os import re import json import tensorflow as tf import tokenization os.environ["CUDA_VISIBLE_DEVICES"] = "0" vocab_file = "./vocab.txt" tokenizer_ = tokenization.FullTokenizer(vocab_file=vocab_file) label2id = json.loads(open("./label2id.json").read()) id2label = [k for k, v in label2id.items()] def process_one_example_p(tokenizer, text, max_seq_len=128): textlist = list(text) tokens = [] # labels = [] for i, word in enumerate(textlist): token = tokenizer.tokenize(word) # print(token) tokens.extend(token) if len(tokens) >= max_seq_len - 1: tokens = tokens[0:(max_seq_len - 2)] # labels = labels[0:(max_seq_len - 2)] ntokens = [] segment_ids = [] label_ids = [] ntokens.append("[CLS]") # 句子开始设置CLS 标志 segment_ids.append(0) for i, token in enumerate(tokens): ntokens.append(token) segment_ids.append(0) # label_ids.append(label2id[labels[i]]) ntokens.append("[SEP]") segment_ids.append(0) input_ids = tokenizer.convert_tokens_to_ids(ntokens) input_mask = [1] * len(input_ids) while len(input_ids) < max_seq_len: input_ids.append(0) input_mask.append(0) segment_ids.append(0) label_ids.append(0) ntokens.append("**NULL**") assert len(input_ids) == max_seq_len assert len(input_mask) == max_seq_len assert len(segment_ids) == max_seq_len feature = (input_ids, input_mask, segment_ids) return feature def load_model(model_folder): # We retrieve our checkpoint fullpath try: checkpoint = tf.train.get_checkpoint_state(model_folder) input_checkpoint = checkpoint.model_checkpoint_path print("[INFO] input_checkpoint:", input_checkpoint) except Exception as e: input_checkpoint = model_folder print("[INFO] Model folder", model_folder, repr(e)) # We clear devices to allow TensorFlow to control on which device it will load operations clear_devices = True tf.reset_default_graph() # We import the meta graph and retrieve a Saver saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices) # We start a session and restore the graph weights sess_ = tf.Session() saver.restore(sess_, input_checkpoint) # opts = sess_.graph.get_operations() # for v in opts: # print(v.name) return sess_ model_path = "./ner_bert_base/" sess = load_model(model_path) input_ids = sess.graph.get_tensor_by_name("input_ids:0") input_mask = sess.graph.get_tensor_by_name("input_mask:0") # is_training segment_ids = sess.graph.get_tensor_by_name("segment_ids:0") # fc/dense/Relu cnn_block/Reshape keep_prob = sess.graph.get_tensor_by_name("keep_prob:0") p = sess.graph.get_tensor_by_name("loss/ReverseSequence_1:0") def predict(text): data = [text] # 逐个分成 最大62长度的 text 进行 batch 预测 features = [] for i in data: feature = process_one_example_p(tokenizer_, i, max_seq_len=64) features.append(feature) feed = {input_ids: [feature[0] for feature in features], input_mask: [feature[1] for feature in features], segment_ids: [feature[2] for feature in features], keep_prob: 1.0 } [probs] = sess.run([p], feed) result = [] for index, prob in enumerate(probs): for v in prob[1:len(data[index]) + 1]: result.append(id2label[int(v)]) print(result) labels = {} start = None index = 0 for w, t in zip("".join(data), result): if re.search("^[BS]", t): if start is not None: label = result[index - 1][2:] if labels.get(label): te_ = text[start:index] # print(te_, labels) labels[label][te_] = [[start, index - 1]] else: te_ = text[start:index] # print(te_, labels) labels[label] = {te_: [[start, index - 1]]} start = index # print(start) if re.search("^O", t): if start is not None: # print(start) label = result[index - 1][2:] if labels.get(label): te_ = text[start:index] # print(te_, labels) labels[label][te_] = [[start, index - 1]] else: te_ = text[start:index] # print(te_, labels) labels[label] = {te_: [[start, index - 1]]} # else: # print(start, labels) start = None index += 1 if start is not None: # print(start) label = result[start][2:] if labels.get(label): te_ = text[start:index] # print(te_, labels) labels[label][te_] = [[start, index - 1]] else: te_ = text[start:index] # print(te_, labels) labels[label] = {te_: [[start, index - 1]]} # print(labels) return labels def submit(path): data = [] for line in open(path): if not line.strip(): continue _ = json.loads(line.strip()) res = predict(_["text"]) data.append(json.dumps({"label": res}, ensure_ascii=False)) open("ner_predict.json", "w").write("\n".join(data)) if __name__ == "__main__": text_ = "梅塔利斯在乌克兰联赛、杯赛及联盟杯中保持9场不败,状态相当出色;" res_ = predict(text_) print(res_) submit("data/thuctc_valid.json")
[ "tensorflow.train.import_meta_graph", "tensorflow.reset_default_graph", "tokenization.FullTokenizer", "tensorflow.Session", "json.dumps", "re.search", "tensorflow.train.get_checkpoint_state" ]
[((239, 288), 'tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'vocab_file'}), '(vocab_file=vocab_file)\n', (265, 288), False, 'import tokenization\n'), ((2103, 2127), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2125, 2127), True, 'import tensorflow as tf\n'), ((2192, 2280), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (["(input_checkpoint + '.meta')"], {'clear_devices': 'clear_devices'}), "(input_checkpoint + '.meta', clear_devices=\n clear_devices)\n", (2218, 2280), True, 'import tensorflow as tf\n'), ((2344, 2356), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2354, 2356), True, 'import tensorflow as tf\n'), ((1688, 1731), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['model_folder'], {}), '(model_folder)\n', (1717, 1731), True, 'import tensorflow as tf\n'), ((3662, 3683), 're.search', 're.search', (['"""^[BS]"""', 't'], {}), "('^[BS]', t)\n", (3671, 3683), False, 'import re\n'), ((4185, 4203), 're.search', 're.search', (['"""^O"""', 't'], {}), "('^O', t)\n", (4194, 4203), False, 'import re\n'), ((5390, 5436), 'json.dumps', 'json.dumps', (["{'label': res}"], {'ensure_ascii': '(False)'}), "({'label': res}, ensure_ascii=False)\n", (5400, 5436), False, 'import json\n')]
import os import torch def GERF_loss(GT, pred, args): mask = (GT < args.maxdisp) & (GT >= 0) # print(mask.size(), GT.size(), pred.size()) count = len(torch.nonzero(mask)) # print(count) if count == 0: count = 1 return torch.sum(torch.sqrt(torch.pow(GT[mask] - pred[mask], 2) + 4) /2 - 1) / count
[ "torch.nonzero", "torch.pow" ]
[((163, 182), 'torch.nonzero', 'torch.nonzero', (['mask'], {}), '(mask)\n', (176, 182), False, 'import torch\n'), ((272, 307), 'torch.pow', 'torch.pow', (['(GT[mask] - pred[mask])', '(2)'], {}), '(GT[mask] - pred[mask], 2)\n', (281, 307), False, 'import torch\n')]
import inspect import os def get_datasets_folder(): return os.path.join(get_data_folder(), "Datasets") def get_data_folder(): return os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
[ "inspect.currentframe" ]
[((192, 214), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (212, 214), False, 'import inspect\n')]
from collections import Counter import difflib def _checksum(r): counter = Counter(r) return int(any([x == 2 for x in counter.values()])), int(any([x == 3 for x in counter.values()])) def _solve_1(rows): d = [_checksum(row) for row in rows] return sum([x[0] for x in d]) * sum([x[1] for x in d]) def _solve_2(rows): for i, r in enumerate(rows): for r2 in rows[i:]: diffs = len(r) - int(round(difflib.SequenceMatcher(a=r, b=r2).ratio() * len(r))) if diffs == 1: return "".join([a for a, b in zip(r, r2) if a == b]) return 0 def solve(data): rows = data.splitlines() return _solve_1(rows), _solve_2(rows) if __name__ == '__main__': from AOC2018 import run_solver run_solver(solve, __file__)
[ "collections.Counter", "AOC2018.run_solver", "difflib.SequenceMatcher" ]
[((81, 91), 'collections.Counter', 'Counter', (['r'], {}), '(r)\n', (88, 91), False, 'from collections import Counter\n'), ((760, 787), 'AOC2018.run_solver', 'run_solver', (['solve', '__file__'], {}), '(solve, __file__)\n', (770, 787), False, 'from AOC2018 import run_solver\n'), ((439, 473), 'difflib.SequenceMatcher', 'difflib.SequenceMatcher', ([], {'a': 'r', 'b': 'r2'}), '(a=r, b=r2)\n', (462, 473), False, 'import difflib\n')]
import asyncio import functools import contextlib import aiohttp from ..protocol import Protocol from ..exceptions import InstagramError __all__ = ( "AioHTTPInstagramApi", ) class AioHTTPInstagramApi: def __init__(self, username, password, state=None, delay=5, proxy=None, loop=None, lock=None): if proxy is None: self._conn = None else: self._conn = aiohttp.ProxyConnector(proxy=proxy) self.proto = Protocol(username, password, state) self.delay = delay self.loop = loop or asyncio.get_event_loop() self.lock = lock or asyncio.Lock(loop=self.loop) self.last_request_time = 0 @property def state(self): return self.proto.state def __getattr__(self, name): method = getattr(self.proto, name) @functools.wraps(method) def wrapper(*args, **kwargs): return self._run(method(*args, **kwargs)) return wrapper async def _request(self, request): kw = request._asdict() async with aiohttp.ClientSession(cookies=kw.pop("cookies")) as session: async with session.request(**kw) as response: if not await response.read(): raise InstagramError(response) return Protocol.Response( cookies={c.key: c.value for c in session.cookie_jar}, json=await response.json(), status_code=response.status, ) async def _run(self, generator): with (await self.lock): response = None with contextlib.suppress(StopIteration): while True: request = generator.send(response) now = self.loop.time() timeout = max(0, self.delay - (now - self.last_request_time)) await asyncio.sleep(timeout, loop=self.loop) self.last_request_time = self.loop.time() response = await self._request(request) return response.json
[ "asyncio.get_event_loop", "asyncio.sleep", "contextlib.suppress", "asyncio.Lock", "functools.wraps", "aiohttp.ProxyConnector" ]
[((826, 849), 'functools.wraps', 'functools.wraps', (['method'], {}), '(method)\n', (841, 849), False, 'import functools\n'), ((406, 441), 'aiohttp.ProxyConnector', 'aiohttp.ProxyConnector', ([], {'proxy': 'proxy'}), '(proxy=proxy)\n', (428, 441), False, 'import aiohttp\n'), ((554, 578), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (576, 578), False, 'import asyncio\n'), ((607, 635), 'asyncio.Lock', 'asyncio.Lock', ([], {'loop': 'self.loop'}), '(loop=self.loop)\n', (619, 635), False, 'import asyncio\n'), ((1618, 1652), 'contextlib.suppress', 'contextlib.suppress', (['StopIteration'], {}), '(StopIteration)\n', (1637, 1652), False, 'import contextlib\n'), ((1888, 1926), 'asyncio.sleep', 'asyncio.sleep', (['timeout'], {'loop': 'self.loop'}), '(timeout, loop=self.loop)\n', (1901, 1926), False, 'import asyncio\n')]
import jinja2 def render(filename, context={}, error=None, path='templates'): if error: # Error should be a string if isinstance(error, str): context['error'] = error else: raise TypeError('Error message must be a string') return jinja2.Environment( loader=jinja2.FileSystemLoader(path) ).get_template(filename).render(context)
[ "jinja2.FileSystemLoader" ]
[((323, 352), 'jinja2.FileSystemLoader', 'jinja2.FileSystemLoader', (['path'], {}), '(path)\n', (346, 352), False, 'import jinja2\n')]
""" @author: <NAME> @contact: <EMAIL> """ import logging import numpy as np # type: ignore import sys from typing import Callable def ert_type(x, stype, label): if not isinstance(x, stype): raise AssertionError(f"{label} should be {stype}, {type(x)} instead") def ert_multiTypes(x, types, label): cond = any(isinstance(x, t) for t in types) if not cond: raise AssertionError(f"{label} should be one of {types}, {type(x)} instead") def ert_nonNeg(x, label, include_zero=False): if not include_zero: if x <= 0: raise AssertionError(f"{label} should be greater than 0") elif x < 0: raise AssertionError(f"{label} should be greater than or equal to 0") def ert_inInterv(x, vmin, vmax, label, leftClose=False, rightClose=True): if leftClose: if rightClose: if x < vmin or x > vmax: raise AssertionError(f"expected {vmin}<={label}<={vmax}") elif x < vmin or x >= vmax: raise AssertionError(f"expected {vmin}<={label}<{vmax}") elif rightClose: if x <= vmin or x > vmax: raise AssertionError(f"expected {vmin}<{label}<={vmax}") elif x <= vmin or x >= vmax: raise AssertionError(f"expected {vmin}<{label}<{vmax}") def ert_in_dtype(x, dtype): if dtype.startswith("f"): if x > np.finfo(dtype).max: raise AssertionError( " ".join( [ "expected to be lower than {dtype} max:", f"{x} < {np.finfo(dtype).max}", ] ) ) elif dtype.startswith("u") or dtype.startswith("i"): if x > np.iinfo(dtype).max: raise AssertionError( " ".join( [ "expected to be lower than {dtype} max:", f"{x} < {np.iinfo(dtype).max}", ] ) ) else: logging.warning(f"assert not implemented for dtype '{dtype}'") def enable_rich_assert(fun: Callable) -> Callable: def wrapper(*args, **kwargs): try: return fun(*args, **kwargs) except AssertionError as e: logging.exception(e) sys.exit() return wrapper
[ "logging.exception", "logging.warning", "numpy.iinfo", "numpy.finfo", "sys.exit" ]
[((1998, 2060), 'logging.warning', 'logging.warning', (['f"""assert not implemented for dtype \'{dtype}\'"""'], {}), '(f"assert not implemented for dtype \'{dtype}\'")\n', (2013, 2060), False, 'import logging\n'), ((1350, 1365), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (1358, 1365), True, 'import numpy as np\n'), ((2249, 2269), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (2266, 2269), False, 'import logging\n'), ((2282, 2292), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2290, 2292), False, 'import sys\n'), ((1701, 1716), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (1709, 1716), True, 'import numpy as np\n'), ((1552, 1567), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (1560, 1567), True, 'import numpy as np\n'), ((1903, 1918), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (1911, 1918), True, 'import numpy as np\n')]
import ast import os import cv2 import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras from keras.applications.densenet import preprocess_input from keras.metrics import (categorical_accuracy, top_k_categorical_accuracy) from keras.models import Model, load_model DP_DIR = './input/shuffle-csvs/' INPUT_DIR = './input/quickdraw-doodle-recognition/' BASE_SIZE = 256 NCSVS = 200 NCATS = 340 np.random.seed(seed=2018) tf.set_random_seed(seed=2018) def f2cat(filename: str) -> str: return filename.split('.')[0] def list_all_categories(): files = os.listdir(os.path.join(INPUT_DIR, 'train_simplified')) return sorted([f2cat(f) for f in files], key=str.lower) def apk(actual, predicted, k=3): """ Source: https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/average_precision.py """ if len(predicted) > k: predicted = predicted[:k] score = 0.0 num_hits = 0.0 for i, p in enumerate(predicted): if p in actual and p not in predicted[:i]: num_hits += 1.0 score += num_hits / (i + 1.0) if not actual: return 0.0 return score / min(len(actual), k) def mapk(actual, predicted, k=3): """ Source: https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/average_precision.py """ return np.mean([apk(a, p, k) for a, p in zip(actual, predicted)]) def preds2catids(predictions): return pd.DataFrame(np.argsort(-predictions, axis=1)[:, :3], columns=['a', 'b', 'c']) def top_3_accuracy(y_true, y_pred): return top_k_categorical_accuracy(y_true, y_pred, k=3) def draw_cv2(raw_strokes, size=256, lw=6, time_color=True): img = np.zeros((BASE_SIZE, BASE_SIZE), np.uint8) for t, stroke in enumerate(raw_strokes): for i in range(len(stroke[0]) - 1): color = 255 - min(t, 10) * 13 if time_color else 255 _ = cv2.line(img, (stroke[0][i], stroke[1][i]), (stroke[0][i + 1], stroke[1][i + 1]), color, lw) img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) if size != BASE_SIZE: return cv2.resize(img, (size, size)) else: return img def image_generator_xd(size, batchsize, ks, lw=6, time_color=True): while True: for k in np.random.permutation(ks): filename = os.path.join(DP_DIR, 'train_k{}.csv.gz'.format(k)) for df in pd.read_csv(filename, chunksize=batchsize): df['drawing'] = df['drawing'].apply(ast.literal_eval) x = np.zeros((len(df), size, size, 3)) for i, raw_strokes in enumerate(df.drawing.values): x[i, :, :, :] = draw_cv2(raw_strokes, size=size, lw=lw, time_color=time_color) x = preprocess_input(x).astype(np.float32) y = keras.utils.to_categorical(df.y, num_classes=NCATS) yield x, y def df_to_image_array_xd(df, size, lw=6, time_color=True): df['drawing'] = df['drawing'].apply(ast.literal_eval) x = np.zeros((len(df), size, size, 3)) for i, raw_strokes in enumerate(df.drawing.values): x[i, :, :, :] = draw_cv2( raw_strokes, size=size, lw=lw, time_color=time_color) x = preprocess_input(x).astype(np.float32) return x class TTA_ModelWrapper(): """A simple TTA wrapper for keras computer vision models. Args: model (keras model): A fitted keras model with a predict method. """ def __init__(self, model): self.model = model def predict(self, X): """Wraps the predict method of the provided model. Augments the testdata with horizontal and vertical flips and averages the results. Args: X (numpy array of dim 4): The data to get predictions for. """ p0 = self.model.predict(X, batch_size=128, verbose=1) p1 = self.model.predict(np.flipud(X), batch_size=128, verbose=1) p = (p0 + p1) / 2 return np.array(p)
[ "cv2.line", "keras.applications.densenet.preprocess_input", "numpy.random.seed", "tensorflow.keras.utils.to_categorical", "cv2.cvtColor", "pandas.read_csv", "numpy.zeros", "numpy.flipud", "tensorflow.set_random_seed", "numpy.argsort", "numpy.array", "numpy.random.permutation", "keras.metrics.top_k_categorical_accuracy", "os.path.join", "cv2.resize" ]
[((429, 454), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(2018)'}), '(seed=2018)\n', (443, 454), True, 'import numpy as np\n'), ((455, 484), 'tensorflow.set_random_seed', 'tf.set_random_seed', ([], {'seed': '(2018)'}), '(seed=2018)\n', (473, 484), True, 'import tensorflow as tf\n'), ((1588, 1635), 'keras.metrics.top_k_categorical_accuracy', 'top_k_categorical_accuracy', (['y_true', 'y_pred'], {'k': '(3)'}), '(y_true, y_pred, k=3)\n', (1614, 1635), False, 'from keras.metrics import categorical_accuracy, top_k_categorical_accuracy\n'), ((1708, 1750), 'numpy.zeros', 'np.zeros', (['(BASE_SIZE, BASE_SIZE)', 'np.uint8'], {}), '((BASE_SIZE, BASE_SIZE), np.uint8)\n', (1716, 1750), True, 'import numpy as np\n'), ((2049, 2086), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2RGB'], {}), '(img, cv2.COLOR_GRAY2RGB)\n', (2061, 2086), False, 'import cv2\n'), ((606, 649), 'os.path.join', 'os.path.join', (['INPUT_DIR', '"""train_simplified"""'], {}), "(INPUT_DIR, 'train_simplified')\n", (618, 649), False, 'import os\n'), ((2128, 2157), 'cv2.resize', 'cv2.resize', (['img', '(size, size)'], {}), '(img, (size, size))\n', (2138, 2157), False, 'import cv2\n'), ((2290, 2315), 'numpy.random.permutation', 'np.random.permutation', (['ks'], {}), '(ks)\n', (2311, 2315), True, 'import numpy as np\n'), ((4029, 4040), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (4037, 4040), True, 'import numpy as np\n'), ((1473, 1505), 'numpy.argsort', 'np.argsort', (['(-predictions)'], {'axis': '(1)'}), '(-predictions, axis=1)\n', (1483, 1505), True, 'import numpy as np\n'), ((1921, 2017), 'cv2.line', 'cv2.line', (['img', '(stroke[0][i], stroke[1][i])', '(stroke[0][i + 1], stroke[1][i + 1])', 'color', 'lw'], {}), '(img, (stroke[0][i], stroke[1][i]), (stroke[0][i + 1], stroke[1][i +\n 1]), color, lw)\n', (1929, 2017), False, 'import cv2\n'), ((2413, 2455), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'chunksize': 'batchsize'}), '(filename, chunksize=batchsize)\n', (2424, 2455), True, 'import pandas as pd\n'), ((3278, 3297), 'keras.applications.densenet.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (3294, 3297), False, 'from keras.applications.densenet import preprocess_input\n'), ((3947, 3959), 'numpy.flipud', 'np.flipud', (['X'], {}), '(X)\n', (3956, 3959), True, 'import numpy as np\n'), ((2873, 2924), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['df.y'], {'num_classes': 'NCATS'}), '(df.y, num_classes=NCATS)\n', (2899, 2924), False, 'from tensorflow import keras\n'), ((2814, 2833), 'keras.applications.densenet.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (2830, 2833), False, 'from keras.applications.densenet import preprocess_input\n')]
# Copyright (c) 2018 <NAME>. # Cura is released under the terms of the LGPLv3 or higher. from PyQt5.QtCore import Qt, pyqtSlot from UM.Qt.ListModel import ListModel from UM.Logger import Logger # # This the QML model for the quality management page. # class QualityManagementModel(ListModel): NameRole = Qt.UserRole + 1 IsReadOnlyRole = Qt.UserRole + 2 QualityGroupRole = Qt.UserRole + 3 QualityChangesGroupRole = Qt.UserRole + 4 def __init__(self, parent = None): super().__init__(parent) self.addRoleName(self.NameRole, "name") self.addRoleName(self.IsReadOnlyRole, "is_read_only") self.addRoleName(self.QualityGroupRole, "quality_group") self.addRoleName(self.QualityChangesGroupRole, "quality_changes_group") from cura.CuraApplication import CuraApplication self._container_registry = CuraApplication.getInstance().getContainerRegistry() self._machine_manager = CuraApplication.getInstance().getMachineManager() self._extruder_manager = CuraApplication.getInstance().getExtruderManager() self._quality_manager = CuraApplication.getInstance().getQualityManager() self._machine_manager.globalContainerChanged.connect(self._update) self._quality_manager.qualitiesUpdated.connect(self._update) self._update() def _update(self): Logger.log("d", "Updating {model_class_name}.".format(model_class_name = self.__class__.__name__)) global_stack = self._machine_manager.activeMachine if not global_stack: self.setItems([]) return quality_group_dict = self._quality_manager.getQualityGroups(global_stack) quality_changes_group_dict = self._quality_manager.getQualityChangesGroups(global_stack) available_quality_types = set(quality_type for quality_type, quality_group in quality_group_dict.items() if quality_group.is_available) if not available_quality_types and not quality_changes_group_dict: # Nothing to show self.setItems([]) return item_list = [] # Create quality group items for quality_group in quality_group_dict.values(): if not quality_group.is_available: continue item = {"name": quality_group.name, "is_read_only": True, "quality_group": quality_group, "quality_changes_group": None} item_list.append(item) # Sort by quality names item_list = sorted(item_list, key = lambda x: x["name"].upper()) # Create quality_changes group items quality_changes_item_list = [] for quality_changes_group in quality_changes_group_dict.values(): if quality_changes_group.quality_type not in available_quality_types: continue quality_group = quality_group_dict[quality_changes_group.quality_type] item = {"name": quality_changes_group.name, "is_read_only": False, "quality_group": quality_group, "quality_changes_group": quality_changes_group} quality_changes_item_list.append(item) # Sort quality_changes items by names and append to the item list quality_changes_item_list = sorted(quality_changes_item_list, key = lambda x: x["name"].upper()) item_list += quality_changes_item_list self.setItems(item_list) # TODO: Duplicated code here from InstanceContainersModel. Refactor and remove this later. # ## Gets a list of the possible file filters that the plugins have # registered they can read or write. The convenience meta-filters # "All Supported Types" and "All Files" are added when listing # readers, but not when listing writers. # # \param io_type \type{str} name of the needed IO type # \return A list of strings indicating file name filters for a file # dialog. @pyqtSlot(str, result = "QVariantList") def getFileNameFilters(self, io_type): from UM.i18n import i18nCatalog catalog = i18nCatalog("uranium") #TODO: This function should be in UM.Resources! filters = [] all_types = [] for plugin_id, meta_data in self._getIOPlugins(io_type): for io_plugin in meta_data[io_type]: filters.append(io_plugin["description"] + " (*." + io_plugin["extension"] + ")") all_types.append("*.{0}".format(io_plugin["extension"])) if "_reader" in io_type: # if we're listing readers, add the option to show all supported files as the default option filters.insert(0, catalog.i18nc("@item:inlistbox", "All Supported Types ({0})", " ".join(all_types))) filters.append(catalog.i18nc("@item:inlistbox", "All Files (*)")) # Also allow arbitrary files, if the user so prefers. return filters ## Gets a list of profile reader or writer plugins # \return List of tuples of (plugin_id, meta_data). def _getIOPlugins(self, io_type): from UM.PluginRegistry import PluginRegistry pr = PluginRegistry.getInstance() active_plugin_ids = pr.getActivePlugins() result = [] for plugin_id in active_plugin_ids: meta_data = pr.getMetaData(plugin_id) if io_type in meta_data: result.append( (plugin_id, meta_data) ) return result
[ "UM.PluginRegistry.PluginRegistry.getInstance", "cura.CuraApplication.CuraApplication.getInstance", "UM.i18n.i18nCatalog", "PyQt5.QtCore.pyqtSlot" ]
[((4055, 4091), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['str'], {'result': '"""QVariantList"""'}), "(str, result='QVariantList')\n", (4063, 4091), False, 'from PyQt5.QtCore import Qt, pyqtSlot\n'), ((4195, 4217), 'UM.i18n.i18nCatalog', 'i18nCatalog', (['"""uranium"""'], {}), "('uranium')\n", (4206, 4217), False, 'from UM.i18n import i18nCatalog\n'), ((5230, 5258), 'UM.PluginRegistry.PluginRegistry.getInstance', 'PluginRegistry.getInstance', ([], {}), '()\n', (5256, 5258), False, 'from UM.PluginRegistry import PluginRegistry\n'), ((871, 900), 'cura.CuraApplication.CuraApplication.getInstance', 'CuraApplication.getInstance', ([], {}), '()\n', (898, 900), False, 'from cura.CuraApplication import CuraApplication\n'), ((956, 985), 'cura.CuraApplication.CuraApplication.getInstance', 'CuraApplication.getInstance', ([], {}), '()\n', (983, 985), False, 'from cura.CuraApplication import CuraApplication\n'), ((1039, 1068), 'cura.CuraApplication.CuraApplication.getInstance', 'CuraApplication.getInstance', ([], {}), '()\n', (1066, 1068), False, 'from cura.CuraApplication import CuraApplication\n'), ((1122, 1151), 'cura.CuraApplication.CuraApplication.getInstance', 'CuraApplication.getInstance', ([], {}), '()\n', (1149, 1151), False, 'from cura.CuraApplication import CuraApplication\n')]
from click.testing import CliRunner from git_history.cli import cli from git_history.utils import RESERVED import itertools import json import pytest import subprocess import sqlite_utils import textwrap git_commit = [ "git", "-c", "user.name='Tests'", "-c", "user.email='<EMAIL>'", "commit", ] def make_repo(tmpdir): repo_dir = tmpdir / "repo" repo_dir.mkdir() # This one is used for the README generated by Cog: (repo_dir / "incidents.json").write_text( json.dumps( [ { "IncidentID": "abc123", "Location": "Corner of 4th and Vermont", "Type": "fire", }, { "IncidentID": "cde448", "Location": "555 West Example Drive", "Type": "medical", }, ] ), "utf-8", ) (repo_dir / "items.json").write_text( json.dumps( [ { "product_id": 1, "name": "Gin", }, { "product_id": 2, "name": "Tonic", }, ] ), "utf-8", ) (repo_dir / "items-with-reserved-columns.json").write_text( json.dumps( [ { "_id": 1, "_item": "Gin", "_version": "v1", "_commit": "commit1", "rowid": 5, }, { "_id": 2, "_item": "Tonic", "_version": "v1", "_commit": "commit1", "rowid": 6, }, ] ), "utf-8", ) (repo_dir / "items-with-banned-columns.json").write_text( json.dumps( [ { "_id_": 1, "_version_": "Gin", } ] ), "utf-8", ) (repo_dir / "trees.csv").write_text( "TreeID,name\n1,Sophia\n2,Charlie", "utf-8", ) (repo_dir / "trees.tsv").write_text( "TreeID\tname\n1\tSophia\n2\tCharlie", "utf-8", ) (repo_dir / "increment.txt").write_text("1", "utf-8") subprocess.call(["git", "init"], cwd=str(repo_dir)) subprocess.call( [ "git", "add", "incidents.json", "items.json", "items-with-reserved-columns.json", "items-with-banned-columns.json", "trees.csv", "trees.tsv", "increment.txt", ], cwd=str(repo_dir), ) subprocess.call(git_commit + ["-m", "first"], cwd=str(repo_dir)) subprocess.call(["git", "branch", "-m", "main"], cwd=str(repo_dir)) (repo_dir / "items.json").write_text( json.dumps( [ { "product_id": 1, "name": "Gin", }, { "product_id": 2, "name": "Tonic 2", }, { "product_id": 3, "name": "Rum", }, ] ), "utf-8", ) (repo_dir / "items-with-reserved-columns.json").write_text( json.dumps( [ { "_id": 1, "_item": "Gin", "_version": "v1", "_commit": "commit1", "rowid": 5, }, { "_id": 2, "_item": "Tonic 2", "_version": "v1", "_commit": "commit1", "rowid": 6, }, { "_id": 3, "_item": "Rum", "_version": "v1", "_commit": "commit1", "rowid": 7, }, ] ), "utf-8", ) subprocess.call(git_commit + ["-m", "second", "-a"], cwd=str(repo_dir)) # Three more commits to test --skip for i in range(2, 4): (repo_dir / "increment.txt").write_text(str(i), "utf-8") subprocess.call( git_commit + ["-m", "increment {}".format(i), "-a"], cwd=str(repo_dir) ) return repo_dir @pytest.fixture def repo(tmpdir): return make_repo(tmpdir) def expected_create_view(namespace): return textwrap.dedent( """ CREATE VIEW {namespace}_version_detail AS select commits.commit_at as _commit_at, commits.hash as _commit_hash, {namespace}_version.*, ( select json_group_array(name) from columns where id in ( select column from {namespace}_changed where item_version = {namespace}_version._id ) ) as _changed_columns from {namespace}_version join commits on commits.id = {namespace}_version._commit; """.format( namespace=namespace ) ).strip() @pytest.mark.parametrize("namespace", (None, "custom")) def test_file_without_id(repo, tmpdir, namespace): runner = CliRunner() db_path = str(tmpdir / "db.db") with runner.isolated_filesystem(): options = ["file", db_path, str(repo / "items.json"), "--repo", str(repo)] if namespace: options += ["--namespace", namespace] result = runner.invoke(cli, options) assert result.exit_code == 0 db = sqlite_utils.Database(db_path) assert db.schema == ( "CREATE TABLE [namespaces] (\n" " [id] INTEGER PRIMARY KEY,\n" " [name] TEXT\n" ");\n" "CREATE UNIQUE INDEX [idx_namespaces_name]\n" " ON [namespaces] ([name]);\n" "CREATE TABLE [commits] (\n" " [id] INTEGER PRIMARY KEY,\n" " [namespace] INTEGER REFERENCES [namespaces]([id]),\n" " [hash] TEXT,\n" " [commit_at] TEXT\n" ");\n" "CREATE UNIQUE INDEX [idx_commits_namespace_hash]\n" " ON [commits] ([namespace], [hash]);\n" "CREATE TABLE [{}] (\n".format(namespace or "item") + " [product_id] INTEGER,\n" " [name] TEXT\n" ");" ) assert db["commits"].count == 2 # Should have some duplicates assert [(r["product_id"], r["name"]) for r in db[namespace or "item"].rows] == [ (1, "Gin"), (2, "Tonic"), (1, "Gin"), (2, "Tonic 2"), (3, "Rum"), ] @pytest.mark.parametrize("namespace", (None, "custom")) def test_file_with_id(repo, tmpdir, namespace): runner = CliRunner() db_path = str(tmpdir / "db.db") with runner.isolated_filesystem(): result = runner.invoke( cli, [ "file", db_path, str(repo / "items.json"), "--repo", str(repo), "--id", "product_id", ] + (["--namespace", namespace] if namespace else []), ) assert result.exit_code == 0 db = sqlite_utils.Database(db_path) item_table = namespace or "item" version_table = "{}_version".format(item_table) assert db.schema == """ CREATE TABLE [namespaces] ( [id] INTEGER PRIMARY KEY, [name] TEXT ); CREATE UNIQUE INDEX [idx_namespaces_name] ON [namespaces] ([name]); CREATE TABLE [commits] ( [id] INTEGER PRIMARY KEY, [namespace] INTEGER REFERENCES [namespaces]([id]), [hash] TEXT, [commit_at] TEXT ); CREATE UNIQUE INDEX [idx_commits_namespace_hash] ON [commits] ([namespace], [hash]); CREATE TABLE [{namespace}] ( [_id] INTEGER PRIMARY KEY, [_item_id] TEXT , [product_id] INTEGER, [name] TEXT, [_commit] INTEGER); CREATE UNIQUE INDEX [idx_{namespace}__item_id] ON [{namespace}] ([_item_id]); CREATE TABLE [{namespace}_version] ( [_id] INTEGER PRIMARY KEY, [_item] INTEGER REFERENCES [{namespace}]([_id]), [_version] INTEGER, [_commit] INTEGER REFERENCES [commits]([id]), [product_id] INTEGER, [name] TEXT, [_item_full_hash] TEXT ); CREATE TABLE [columns] ( [id] INTEGER PRIMARY KEY, [namespace] INTEGER REFERENCES [namespaces]([id]), [name] TEXT ); CREATE UNIQUE INDEX [idx_columns_namespace_name] ON [columns] ([namespace], [name]); CREATE TABLE [{namespace}_changed] ( [item_version] INTEGER REFERENCES [{namespace}_version]([_id]), [column] INTEGER REFERENCES [columns]([id]), PRIMARY KEY ([item_version], [column]) ); {view} CREATE INDEX [idx_{namespace}_version__item] ON [{namespace}_version] ([_item]); """.strip().format( namespace=namespace or "item", view=expected_create_view(namespace or "item"), ) assert db["commits"].count == 2 # Should have no duplicates item_version = [ r for r in db.query( "select product_id, _version, name from {}".format(version_table) ) ] assert item_version == [ {"product_id": 1, "_version": 1, "name": "Gin"}, {"product_id": 2, "_version": 1, "name": "Tonic"}, # product_id is None because it did not change here {"product_id": None, "_version": 2, "name": "Tonic 2"}, {"product_id": 3, "_version": 1, "name": "Rum"}, ] changed = list( db.query( """ select {namespace}.product_id, {namespace}_version._version as version, columns.name as column_name from {namespace}_changed join columns on {namespace}_changed.column = columns.id join {namespace}_version on {namespace}_changed.item_version = {namespace}_version._id join {namespace} on {namespace}._id = {namespace}_version._item order by {namespace}.product_id, {namespace}_version._version, columns.name """.format( namespace=namespace or "item" ) ) ) assert changed == [ {"product_id": 1, "version": 1, "column_name": "name"}, {"product_id": 1, "version": 1, "column_name": "product_id"}, {"product_id": 2, "version": 1, "column_name": "name"}, {"product_id": 2, "version": 1, "column_name": "product_id"}, {"product_id": 2, "version": 2, "column_name": "name"}, {"product_id": 3, "version": 1, "column_name": "name"}, {"product_id": 3, "version": 1, "column_name": "product_id"}, ] # Test the view view_rows = list( db.query( "select _version, product_id, name, _changed_columns from {}_version_detail".format( namespace or "item" ) ) ) # Sort order of _changed_columns JSON is undefined, so fix that for row in view_rows: row["_changed_columns"] = list(sorted(json.loads(row["_changed_columns"]))) assert view_rows == [ { "_version": 1, "product_id": 1, "name": "Gin", "_changed_columns": ["name", "product_id"], }, { "_version": 1, "product_id": 2, "name": "Tonic", "_changed_columns": ["name", "product_id"], }, { "_version": 2, "product_id": None, "name": "Tonic 2", "_changed_columns": ["name"], }, { "_version": 1, "product_id": 3, "name": "Rum", "_changed_columns": ["name", "product_id"], }, ] @pytest.mark.parametrize("namespace", (None, "custom")) def test_file_with_id_resume(repo, tmpdir, namespace): runner = CliRunner() db_path = str(tmpdir / "db.db") result = runner.invoke( cli, [ "file", db_path, str(repo / "items.json"), "--repo", str(repo), "--id", "product_id", ] + (["--namespace", namespace] if namespace else []), ) namespace = namespace or "item" assert result.exit_code == 0 db = sqlite_utils.Database(db_path) item_version = [ r for r in db.query( "select product_id, _version, name from {}_version".format(namespace) ) ] assert item_version == [ {"product_id": 1, "_version": 1, "name": "Gin"}, {"product_id": 2, "_version": 1, "name": "Tonic"}, {"product_id": None, "_version": 2, "name": "Tonic 2"}, {"product_id": 3, "_version": 1, "name": "Rum"}, ] # Now we edit, commit and try again (repo / "items.json").write_text( json.dumps( [ {"product_id": 1, "name": "Gin"}, {"product_id": 2, "name": "Tonic 2"}, # This line has changed from "Rum" to "Rum Pony": {"product_id": 3, "name": "Rum Pony"}, ] ), "utf-8", ) subprocess.call(git_commit + ["-a", "-m", "another"], cwd=str(repo)) result2 = runner.invoke( cli, [ "file", db_path, str(repo / "items.json"), "--repo", str(repo), "--id", "product_id", ] + (["--namespace", namespace] if namespace else []), catch_exceptions=False, ) assert result2.exit_code == 0 item_version2 = [ r for r in db.query( "select _item, product_id, _version, name from {}_version order by _item, _version".format( namespace ) ) ] assert item_version2 == [ {"_item": 1, "product_id": 1, "_version": 1, "name": "Gin"}, {"_item": 2, "product_id": 2, "_version": 1, "name": "Tonic"}, {"_item": 2, "product_id": None, "_version": 2, "name": "Tonic 2"}, {"_item": 3, "product_id": 3, "_version": 1, "name": "Rum"}, {"_item": 3, "product_id": None, "_version": 2, "name": "Rum Pony"}, ] def test_file_with_id_resume_two_namespaces(repo, tmpdir): # https://github.com/simonw/git-history/issues/43 runner = CliRunner() db_path = str(tmpdir / "db.db") def run(namespace): result = runner.invoke( cli, [ "file", db_path, str(repo / "items.json"), "--repo", str(repo), "--id", "product_id", "--namespace", namespace, ], ) assert result.exit_code == 0 for namespace in ("one", "two"): run(namespace) # Now modify items.json, commit and run again (repo / "items.json").write_text( json.dumps( [ {"product_id": 1, "name": "Gin"}, {"product_id": 2, "name": "Tonic 2"}, {"product_id": 3, "name": "Rum Pony"}, ] ), "utf-8", ) subprocess.call(git_commit + ["-a", "-m", "another"], cwd=str(repo)) for namespace in ("one", "two"): run(namespace) db = sqlite_utils.Database(db_path) assert set(db.table_names()) == { "namespaces", "commits", "one", "one_version", "columns", "one_changed", "two", "two_version", "two_changed", } # Should be five versions: Gin, Tonic -> Tonic 2, Rum -> Rum Pony assert db["one_version"].count == 5 assert db["two_version"].count == 5 @pytest.mark.parametrize("namespace", (None, "custom")) def test_file_with_id_full_versions(repo, tmpdir, namespace): runner = CliRunner() db_path = str(tmpdir / "db.db") with runner.isolated_filesystem(): result = runner.invoke( cli, [ "file", db_path, str(repo / "items.json"), "--repo", str(repo), "--id", "product_id", "--full-versions", ] + (["--namespace", namespace] if namespace else []), ) assert result.exit_code == 0 db = sqlite_utils.Database(db_path) item_table = namespace or "item" version_table = "{}_version".format(item_table) assert db.schema == ( "CREATE TABLE [namespaces] (\n" " [id] INTEGER PRIMARY KEY,\n" " [name] TEXT\n" ");\n" "CREATE UNIQUE INDEX [idx_namespaces_name]\n" " ON [namespaces] ([name]);\n" "CREATE TABLE [commits] (\n" " [id] INTEGER PRIMARY KEY,\n" " [namespace] INTEGER REFERENCES [namespaces]([id]),\n" " [hash] TEXT,\n" " [commit_at] TEXT\n" ");\n" "CREATE UNIQUE INDEX [idx_commits_namespace_hash]\n" " ON [commits] ([namespace], [hash]);\n" "CREATE TABLE [{}] (\n".format(item_table) + " [_id] INTEGER PRIMARY KEY,\n" " [_item_id] TEXT\n" ", [product_id] INTEGER, [name] TEXT, [_commit] INTEGER);\n" "CREATE UNIQUE INDEX [idx_{}__item_id]\n".format(item_table) + " ON [{}] ([_item_id]);\n".format(item_table) + "CREATE TABLE [{}] (\n".format(version_table) + " [_id] INTEGER PRIMARY KEY,\n" " [_item] INTEGER REFERENCES [{}]([_id]),\n".format(item_table) + " [_version] INTEGER,\n" " [_commit] INTEGER REFERENCES [commits]([id]),\n" " [product_id] INTEGER,\n" " [name] TEXT\n" ");\n" + expected_create_view(namespace or "item") + "\nCREATE INDEX [idx_{}__item]\n".format(version_table) + " ON [{}] ([_item]);".format(version_table) ) assert db["commits"].count == 2 # Should have no duplicates item_version = [ r for r in db.query( "select product_id, _version, name from {}".format(version_table) ) ] assert item_version == [ {"product_id": 1, "_version": 1, "name": "Gin"}, {"product_id": 2, "_version": 1, "name": "Tonic"}, {"product_id": 2, "_version": 2, "name": "Tonic 2"}, {"product_id": 3, "_version": 1, "name": "Rum"}, ] def test_file_with_reserved_columns(repo, tmpdir): runner = CliRunner() db_path = str(tmpdir / "reserved.db") with runner.isolated_filesystem(): result = runner.invoke( cli, [ "file", db_path, str(repo / "items-with-reserved-columns.json"), "--repo", str(repo), "--id", "_id", "--full-versions", ], catch_exceptions=False, ) assert result.exit_code == 0 db = sqlite_utils.Database(db_path) expected_schema = ( textwrap.dedent( """ CREATE TABLE [namespaces] ( [id] INTEGER PRIMARY KEY, [name] TEXT ); CREATE UNIQUE INDEX [idx_namespaces_name] ON [namespaces] ([name]); CREATE TABLE [commits] ( [id] INTEGER PRIMARY KEY, [namespace] INTEGER REFERENCES [namespaces]([id]), [hash] TEXT, [commit_at] TEXT ); CREATE UNIQUE INDEX [idx_commits_namespace_hash] ON [commits] ([namespace], [hash]); CREATE TABLE [item] ( [_id] INTEGER PRIMARY KEY, [_item_id] TEXT , [_id_] INTEGER, [_item_] TEXT, [_version_] TEXT, [_commit_] TEXT, [rowid_] INTEGER, [_commit] INTEGER); CREATE UNIQUE INDEX [idx_item__item_id] ON [item] ([_item_id]); CREATE TABLE [item_version] ( [_id] INTEGER PRIMARY KEY, [_item] INTEGER REFERENCES [item]([_id]), [_version] INTEGER, [_commit] INTEGER REFERENCES [commits]([id]), [_id_] INTEGER, [_item_] TEXT, [_version_] TEXT, [_commit_] TEXT, [rowid_] INTEGER );""" ) + "\n" + expected_create_view("item") + "\nCREATE INDEX [idx_item_version__item]\n" " ON [item_version] ([_item]);" ).strip() assert db.schema == expected_schema item_version = [ r for r in db.query( "select _id_, _item_, _version_, _commit_, rowid_ from item_version" ) ] assert item_version == [ { "_id_": 1, "_item_": "Gin", "_version_": "v1", "_commit_": "commit1", "rowid_": 5, }, { "_id_": 2, "_item_": "Tonic", "_version_": "v1", "_commit_": "commit1", "rowid_": 6, }, { "_id_": 2, "_item_": "Tonic 2", "_version_": "v1", "_commit_": "commit1", "rowid_": 6, }, { "_id_": 3, "_item_": "Rum", "_version_": "v1", "_commit_": "commit1", "rowid_": 7, }, ] @pytest.mark.parametrize("file", ("trees.csv", "trees.tsv")) def test_csv_tsv(repo, tmpdir, file): runner = CliRunner() db_path = str(tmpdir / "db.db") with runner.isolated_filesystem(): result = runner.invoke( cli, [ "file", db_path, str(repo / file), "--repo", str(repo), "--id", "TreeID", "--csv", "--full-versions", ], catch_exceptions=False, ) assert result.exit_code == 0 db = sqlite_utils.Database(db_path) assert ( db.schema == textwrap.dedent( """ CREATE TABLE [namespaces] ( [id] INTEGER PRIMARY KEY, [name] TEXT ); CREATE UNIQUE INDEX [idx_namespaces_name] ON [namespaces] ([name]); CREATE TABLE [commits] ( [id] INTEGER PRIMARY KEY, [namespace] INTEGER REFERENCES [namespaces]([id]), [hash] TEXT, [commit_at] TEXT ); CREATE UNIQUE INDEX [idx_commits_namespace_hash] ON [commits] ([namespace], [hash]); CREATE TABLE [item] ( [_id] INTEGER PRIMARY KEY, [_item_id] TEXT , [TreeID] TEXT, [name] TEXT, [_commit] INTEGER); CREATE UNIQUE INDEX [idx_item__item_id] ON [item] ([_item_id]); CREATE TABLE [item_version] ( [_id] INTEGER PRIMARY KEY, [_item] INTEGER REFERENCES [item]([_id]), [_version] INTEGER, [_commit] INTEGER REFERENCES [commits]([id]), [TreeID] TEXT, [name] TEXT );""" ).strip() + "\n" + expected_create_view("item") + "\nCREATE INDEX [idx_item_version__item]\n" " ON [item_version] ([_item]);" ) @pytest.mark.parametrize( "dialect,expected_schema", ( ("excel", "CREATE TABLE [item] (\n [TreeID] TEXT,\n [name] TEXT\n)"), ("excel-tab", "CREATE TABLE [item] (\n [TreeID,name] TEXT\n)"), ), ) def test_csv_dialect(repo, tmpdir, dialect, expected_schema): runner = CliRunner() db_path = str(tmpdir / "db.db") with runner.isolated_filesystem(): result = runner.invoke( cli, [ "file", db_path, str(repo / "trees.csv"), "--repo", str(repo), "--dialect", dialect, ], catch_exceptions=False, ) assert result.exit_code == 0 db = sqlite_utils.Database(db_path) assert db["item"].schema == expected_schema @pytest.mark.parametrize( "convert,expected_rows", ( ( "json.loads(content.upper())", [ {"PRODUCT_ID": 1, "NAME": "GIN"}, {"PRODUCT_ID": 2, "NAME": "TONIC"}, {"PRODUCT_ID": 1, "NAME": "GIN"}, {"PRODUCT_ID": 2, "NAME": "TONIC 2"}, {"PRODUCT_ID": 3, "NAME": "RUM"}, ], ), # Generator ( ( "data = json.loads(content)\n" "for item in data:\n" ' yield {"just_name": item["name"]}' ), [ {"just_name": "Gin"}, {"just_name": "Tonic"}, {"just_name": "Gin"}, {"just_name": "<NAME>"}, {"just_name": "Rum"}, ], ), ), ) def test_convert(repo, tmpdir, convert, expected_rows): runner = CliRunner() db_path = str(tmpdir / "db.db") with runner.isolated_filesystem(): result = runner.invoke( cli, [ "file", db_path, str(repo / "items.json"), "--repo", str(repo), "--convert", convert, ], catch_exceptions=False, ) assert result.exit_code == 0 db = sqlite_utils.Database(db_path) rows = [{k: v for k, v in r.items() if k != "_commit"} for r in db["item"].rows] assert rows == expected_rows def test_convert_xml(repo, tmpdir): runner = CliRunner() (repo / "items.xml").write_text( """ <items> <item name="one" value="1" /> <item name="two" value="2" /> </items> """, "utf-8", ) subprocess.call(["git", "add", "items.xml"], cwd=str(repo)) subprocess.call(git_commit + ["-m", "items.xml"], cwd=str(repo)) db_path = str(tmpdir / "db.db") result = runner.invoke( cli, [ "file", db_path, str(repo / "items.xml"), "--repo", str(repo), "--convert", textwrap.dedent( """ tree = xml.etree.ElementTree.fromstring(content) return [el.attrib for el in tree.iter("item")] """ ), "--import", "xml.etree.ElementTree", ], catch_exceptions=False, ) assert result.exit_code == 0 db = sqlite_utils.Database(db_path) assert list(db["item"].rows) == [ {"name": "one", "value": "1"}, {"name": "two", "value": "2"}, ] @pytest.mark.parametrize( "options,expected_texts", ( ([], ["1", "2", "3"]), (["--skip", 0], ["2", "3"]), (["--skip", 0, "--skip", 2], ["3"]), (["--start-at", 2], ["2", "3"]), (["--start-after", 2], ["3"]), (["--start-at", 3], ["3"]), (["--start-after", 3], []), ), ) def test_skip_options(repo, tmpdir, options, expected_texts): runner = CliRunner() commits = list( reversed( subprocess.check_output(["git", "log", "--pretty=format:%H"], cwd=str(repo)) .decode("utf-8") .split("\n") ) ) assert len(commits) == 4 # Rewrite options to replace integers with the corresponding commit hash options = [commits[item] if isinstance(item, int) else item for item in options] db_path = str(tmpdir / "db.db") result = runner.invoke( cli, [ "file", db_path, str(repo / "increment.txt"), "--repo", str(repo), "--convert", '[{"id": 1, "text": content.decode("utf-8")}]', "--id", "id", ] + options, catch_exceptions=False, ) assert result.exit_code == 0 db = sqlite_utils.Database(db_path) actual_text = [r["text"] for r in db["item_version"].rows] assert actual_text == expected_texts def test_reserved_columns_are_reserved(tmpdir, repo): runner = CliRunner() db_path = str(tmpdir / "db.db") runner.invoke( cli, [ "file", db_path, str(repo / "items.json"), "--repo", str(repo), "--id", "product_id", ], ) # Find all columns with _ prefixes and no suffix db = sqlite_utils.Database(db_path) with_prefix = {"rowid"} for table in itertools.chain(db.tables, db.views): for column in table.columns_dict: if column.startswith("_") and not column.endswith("_"): with_prefix.add(column) assert with_prefix == set(RESERVED) @pytest.mark.parametrize("use_wal", (True, False)) def test_wal(repo, tmpdir, use_wal): runner = CliRunner() db_path = str(tmpdir / "db.db") options = [ "file", db_path, str(repo / "items.json"), "--repo", str(repo), ] if use_wal: options.append("--wal") result = runner.invoke( cli, options, catch_exceptions=False, ) assert result.exit_code == 0 db = sqlite_utils.Database(db_path) expected_journal_mode = "wal" if use_wal else "delete" assert db.journal_mode == expected_journal_mode
[ "textwrap.dedent", "json.loads", "sqlite_utils.Database", "json.dumps", "pytest.mark.parametrize", "click.testing.CliRunner", "itertools.chain" ]
[((5263, 5317), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""namespace"""', "(None, 'custom')"], {}), "('namespace', (None, 'custom'))\n", (5286, 5317), False, 'import pytest\n'), ((6734, 6788), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""namespace"""', "(None, 'custom')"], {}), "('namespace', (None, 'custom'))\n", (6757, 6788), False, 'import pytest\n'), ((11720, 11774), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""namespace"""', "(None, 'custom')"], {}), "('namespace', (None, 'custom'))\n", (11743, 11774), False, 'import pytest\n'), ((15692, 15746), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""namespace"""', "(None, 'custom')"], {}), "('namespace', (None, 'custom'))\n", (15715, 15746), False, 'import pytest\n'), ((21266, 21325), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""file"""', "('trees.csv', 'trees.tsv')"], {}), "('file', ('trees.csv', 'trees.tsv'))\n", (21289, 21325), False, 'import pytest\n'), ((23172, 23376), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dialect,expected_schema"""', '((\'excel\', """CREATE TABLE [item] (\n [TreeID] TEXT,\n [name] TEXT\n)"""),\n (\'excel-tab\', """CREATE TABLE [item] (\n [TreeID,name] TEXT\n)"""))'], {}), '(\'dialect,expected_schema\', ((\'excel\',\n """CREATE TABLE [item] (\n [TreeID] TEXT,\n [name] TEXT\n)"""), (\n \'excel-tab\', """CREATE TABLE [item] (\n [TreeID,name] TEXT\n)""")))\n', (23195, 23376), False, 'import pytest\n'), ((24006, 24509), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""convert,expected_rows"""', '((\'json.loads(content.upper())\', [{\'PRODUCT_ID\': 1, \'NAME\': \'GIN\'}, {\n \'PRODUCT_ID\': 2, \'NAME\': \'TONIC\'}, {\'PRODUCT_ID\': 1, \'NAME\': \'GIN\'}, {\n \'PRODUCT_ID\': 2, \'NAME\': \'TONIC 2\'}, {\'PRODUCT_ID\': 3, \'NAME\': \'RUM\'}]),\n (\n """data = json.loads(content)\nfor item in data:\n yield {"just_name": item["name"]}"""\n , [{\'just_name\': \'Gin\'}, {\'just_name\': \'Tonic\'}, {\'just_name\': \'Gin\'},\n {\'just_name\': \'<NAME>\'}, {\'just_name\': \'Rum\'}]))'], {}), '(\'convert,expected_rows\', ((\n \'json.loads(content.upper())\', [{\'PRODUCT_ID\': 1, \'NAME\': \'GIN\'}, {\n \'PRODUCT_ID\': 2, \'NAME\': \'TONIC\'}, {\'PRODUCT_ID\': 1, \'NAME\': \'GIN\'}, {\n \'PRODUCT_ID\': 2, \'NAME\': \'TONIC 2\'}, {\'PRODUCT_ID\': 3, \'NAME\': \'RUM\'}]),\n (\n """data = json.loads(content)\nfor item in data:\n yield {"just_name": item["name"]}"""\n , [{\'just_name\': \'Gin\'}, {\'just_name\': \'Tonic\'}, {\'just_name\': \'Gin\'},\n {\'just_name\': \'<NAME>\'}, {\'just_name\': \'Rum\'}])))\n', (24029, 24509), False, 'import pytest\n'), ((26673, 26948), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""options,expected_texts"""', "(([], ['1', '2', '3']), (['--skip', 0], ['2', '3']), (['--skip', 0,\n '--skip', 2], ['3']), (['--start-at', 2], ['2', '3']), ([\n '--start-after', 2], ['3']), (['--start-at', 3], ['3']), ([\n '--start-after', 3], []))"], {}), "('options,expected_texts', (([], ['1', '2', '3']), (\n ['--skip', 0], ['2', '3']), (['--skip', 0, '--skip', 2], ['3']), ([\n '--start-at', 2], ['2', '3']), (['--start-after', 2], ['3']), ([\n '--start-at', 3], ['3']), (['--start-after', 3], [])))\n", (26696, 26948), False, 'import pytest\n'), ((28779, 28828), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_wal"""', '(True, False)'], {}), "('use_wal', (True, False))\n", (28802, 28828), False, 'import pytest\n'), ((5382, 5393), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (5391, 5393), False, 'from click.testing import CliRunner\n'), ((5711, 5741), 'sqlite_utils.Database', 'sqlite_utils.Database', (['db_path'], {}), '(db_path)\n', (5732, 5741), False, 'import sqlite_utils\n'), ((6850, 6861), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (6859, 6861), False, 'from click.testing import CliRunner\n'), ((7329, 7359), 'sqlite_utils.Database', 'sqlite_utils.Database', (['db_path'], {}), '(db_path)\n', (7350, 7359), False, 'import sqlite_utils\n'), ((11843, 11854), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (11852, 11854), False, 'from click.testing import CliRunner\n'), ((12267, 12297), 'sqlite_utils.Database', 'sqlite_utils.Database', (['db_path'], {}), '(db_path)\n', (12288, 12297), False, 'import sqlite_utils\n'), ((14294, 14305), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (14303, 14305), False, 'from click.testing import CliRunner\n'), ((15282, 15312), 'sqlite_utils.Database', 'sqlite_utils.Database', (['db_path'], {}), '(db_path)\n', (15303, 15312), False, 'import sqlite_utils\n'), ((15822, 15833), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (15831, 15833), False, 'from click.testing import CliRunner\n'), ((16336, 16366), 'sqlite_utils.Database', 'sqlite_utils.Database', (['db_path'], {}), '(db_path)\n', (16357, 16366), False, 'import sqlite_utils\n'), ((18435, 18446), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (18444, 18446), False, 'from click.testing import CliRunner\n'), ((18942, 18972), 'sqlite_utils.Database', 'sqlite_utils.Database', (['db_path'], {}), '(db_path)\n', (18963, 18972), False, 'import sqlite_utils\n'), ((21377, 21388), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (21386, 21388), False, 'from click.testing import CliRunner\n'), ((21876, 21906), 'sqlite_utils.Database', 'sqlite_utils.Database', (['db_path'], {}), '(db_path)\n', (21897, 21906), False, 'import sqlite_utils\n'), ((23474, 23485), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (23483, 23485), False, 'from click.testing import CliRunner\n'), ((23924, 23954), 'sqlite_utils.Database', 'sqlite_utils.Database', (['db_path'], {}), '(db_path)\n', (23945, 23954), False, 'import sqlite_utils\n'), ((24928, 24939), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (24937, 24939), False, 'from click.testing import CliRunner\n'), ((25379, 25409), 'sqlite_utils.Database', 'sqlite_utils.Database', (['db_path'], {}), '(db_path)\n', (25400, 25409), False, 'import sqlite_utils\n'), ((25579, 25590), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (25588, 25590), False, 'from click.testing import CliRunner\n'), ((26517, 26547), 'sqlite_utils.Database', 'sqlite_utils.Database', (['db_path'], {}), '(db_path)\n', (26538, 26547), False, 'import sqlite_utils\n'), ((27083, 27094), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (27092, 27094), False, 'from click.testing import CliRunner\n'), ((27929, 27959), 'sqlite_utils.Database', 'sqlite_utils.Database', (['db_path'], {}), '(db_path)\n', (27950, 27959), False, 'import sqlite_utils\n'), ((28133, 28144), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (28142, 28144), False, 'from click.testing import CliRunner\n'), ((28472, 28502), 'sqlite_utils.Database', 'sqlite_utils.Database', (['db_path'], {}), '(db_path)\n', (28493, 28502), False, 'import sqlite_utils\n'), ((28548, 28584), 'itertools.chain', 'itertools.chain', (['db.tables', 'db.views'], {}), '(db.tables, db.views)\n', (28563, 28584), False, 'import itertools\n'), ((28879, 28890), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (28888, 28890), False, 'from click.testing import CliRunner\n'), ((29239, 29269), 'sqlite_utils.Database', 'sqlite_utils.Database', (['db_path'], {}), '(db_path)\n', (29260, 29269), False, 'import sqlite_utils\n'), ((507, 693), 'json.dumps', 'json.dumps', (["[{'IncidentID': 'abc123', 'Location': 'Corner of 4th and Vermont', 'Type':\n 'fire'}, {'IncidentID': 'cde448', 'Location': '555 West Example Drive',\n 'Type': 'medical'}]"], {}), "([{'IncidentID': 'abc123', 'Location':\n 'Corner of 4th and Vermont', 'Type': 'fire'}, {'IncidentID': 'cde448',\n 'Location': '555 West Example Drive', 'Type': 'medical'}])\n", (517, 693), False, 'import json\n'), ((987, 1073), 'json.dumps', 'json.dumps', (["[{'product_id': 1, 'name': 'Gin'}, {'product_id': 2, 'name': 'Tonic'}]"], {}), "([{'product_id': 1, 'name': 'Gin'}, {'product_id': 2, 'name':\n 'Tonic'}])\n", (997, 1073), False, 'import json\n'), ((1353, 1535), 'json.dumps', 'json.dumps', (["[{'_id': 1, '_item': 'Gin', '_version': 'v1', '_commit': 'commit1', 'rowid':\n 5}, {'_id': 2, '_item': 'Tonic', '_version': 'v1', '_commit': 'commit1',\n 'rowid': 6}]"], {}), "([{'_id': 1, '_item': 'Gin', '_version': 'v1', '_commit':\n 'commit1', 'rowid': 5}, {'_id': 2, '_item': 'Tonic', '_version': 'v1',\n '_commit': 'commit1', 'rowid': 6}])\n", (1363, 1535), False, 'import json\n'), ((1929, 1974), 'json.dumps', 'json.dumps', (["[{'_id_': 1, '_version_': 'Gin'}]"], {}), "([{'_id_': 1, '_version_': 'Gin'}])\n", (1939, 1974), False, 'import json\n'), ((2976, 3098), 'json.dumps', 'json.dumps', (["[{'product_id': 1, 'name': 'Gin'}, {'product_id': 2, 'name': 'Tonic 2'}, {\n 'product_id': 3, 'name': 'Rum'}]"], {}), "([{'product_id': 1, 'name': 'Gin'}, {'product_id': 2, 'name':\n 'Tonic 2'}, {'product_id': 3, 'name': 'Rum'}])\n", (2986, 3098), False, 'import json\n'), ((3453, 3721), 'json.dumps', 'json.dumps', (["[{'_id': 1, '_item': 'Gin', '_version': 'v1', '_commit': 'commit1', 'rowid':\n 5}, {'_id': 2, '_item': 'Tonic 2', '_version': 'v1', '_commit':\n 'commit1', 'rowid': 6}, {'_id': 3, '_item': 'Rum', '_version': 'v1',\n '_commit': 'commit1', 'rowid': 7}]"], {}), "([{'_id': 1, '_item': 'Gin', '_version': 'v1', '_commit':\n 'commit1', 'rowid': 5}, {'_id': 2, '_item': 'Tonic 2', '_version': 'v1',\n '_commit': 'commit1', 'rowid': 6}, {'_id': 3, '_item': 'Rum',\n '_version': 'v1', '_commit': 'commit1', 'rowid': 7}])\n", (3463, 3721), False, 'import json\n'), ((12812, 12939), 'json.dumps', 'json.dumps', (["[{'product_id': 1, 'name': 'Gin'}, {'product_id': 2, 'name': 'Tonic 2'}, {\n 'product_id': 3, 'name': 'Rum Pony'}]"], {}), "([{'product_id': 1, 'name': 'Gin'}, {'product_id': 2, 'name':\n 'Tonic 2'}, {'product_id': 3, 'name': 'Rum Pony'}])\n", (12822, 12939), False, 'import json\n'), ((14906, 15033), 'json.dumps', 'json.dumps', (["[{'product_id': 1, 'name': 'Gin'}, {'product_id': 2, 'name': 'Tonic 2'}, {\n 'product_id': 3, 'name': 'Rum Pony'}]"], {}), "([{'product_id': 1, 'name': 'Gin'}, {'product_id': 2, 'name':\n 'Tonic 2'}, {'product_id': 3, 'name': 'Rum Pony'}])\n", (14916, 15033), False, 'import json\n'), ((26169, 26343), 'textwrap.dedent', 'textwrap.dedent', (['"""\n tree = xml.etree.ElementTree.fromstring(content)\n return [el.attrib for el in tree.iter("item")]\n """'], {}), '(\n """\n tree = xml.etree.ElementTree.fromstring(content)\n return [el.attrib for el in tree.iter("item")]\n """\n )\n', (26184, 26343), False, 'import textwrap\n'), ((11012, 11047), 'json.loads', 'json.loads', (["row['_changed_columns']"], {}), "(row['_changed_columns'])\n", (11022, 11047), False, 'import json\n'), ((19005, 20192), 'textwrap.dedent', 'textwrap.dedent', (['"""\n CREATE TABLE [namespaces] (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT\n );\n CREATE UNIQUE INDEX [idx_namespaces_name]\n ON [namespaces] ([name]);\n CREATE TABLE [commits] (\n [id] INTEGER PRIMARY KEY,\n [namespace] INTEGER REFERENCES [namespaces]([id]),\n [hash] TEXT,\n [commit_at] TEXT\n );\n CREATE UNIQUE INDEX [idx_commits_namespace_hash]\n ON [commits] ([namespace], [hash]);\n CREATE TABLE [item] (\n [_id] INTEGER PRIMARY KEY,\n [_item_id] TEXT\n , [_id_] INTEGER, [_item_] TEXT, [_version_] TEXT, [_commit_] TEXT, [rowid_] INTEGER, [_commit] INTEGER);\n CREATE UNIQUE INDEX [idx_item__item_id]\n ON [item] ([_item_id]);\n CREATE TABLE [item_version] (\n [_id] INTEGER PRIMARY KEY,\n [_item] INTEGER REFERENCES [item]([_id]),\n [_version] INTEGER,\n [_commit] INTEGER REFERENCES [commits]([id]),\n [_id_] INTEGER,\n [_item_] TEXT,\n [_version_] TEXT,\n [_commit_] TEXT,\n [rowid_] INTEGER\n );"""'], {}), '(\n """\n CREATE TABLE [namespaces] (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT\n );\n CREATE UNIQUE INDEX [idx_namespaces_name]\n ON [namespaces] ([name]);\n CREATE TABLE [commits] (\n [id] INTEGER PRIMARY KEY,\n [namespace] INTEGER REFERENCES [namespaces]([id]),\n [hash] TEXT,\n [commit_at] TEXT\n );\n CREATE UNIQUE INDEX [idx_commits_namespace_hash]\n ON [commits] ([namespace], [hash]);\n CREATE TABLE [item] (\n [_id] INTEGER PRIMARY KEY,\n [_item_id] TEXT\n , [_id_] INTEGER, [_item_] TEXT, [_version_] TEXT, [_commit_] TEXT, [rowid_] INTEGER, [_commit] INTEGER);\n CREATE UNIQUE INDEX [idx_item__item_id]\n ON [item] ([_item_id]);\n CREATE TABLE [item_version] (\n [_id] INTEGER PRIMARY KEY,\n [_item] INTEGER REFERENCES [item]([_id]),\n [_version] INTEGER,\n [_commit] INTEGER REFERENCES [commits]([id]),\n [_id_] INTEGER,\n [_item_] TEXT,\n [_version_] TEXT,\n [_commit_] TEXT,\n [rowid_] INTEGER\n );"""\n )\n', (19020, 20192), False, 'import textwrap\n'), ((21949, 22991), 'textwrap.dedent', 'textwrap.dedent', (['"""\n CREATE TABLE [namespaces] (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT\n );\n CREATE UNIQUE INDEX [idx_namespaces_name]\n ON [namespaces] ([name]);\n CREATE TABLE [commits] (\n [id] INTEGER PRIMARY KEY,\n [namespace] INTEGER REFERENCES [namespaces]([id]),\n [hash] TEXT,\n [commit_at] TEXT\n );\n CREATE UNIQUE INDEX [idx_commits_namespace_hash]\n ON [commits] ([namespace], [hash]);\n CREATE TABLE [item] (\n [_id] INTEGER PRIMARY KEY,\n [_item_id] TEXT\n , [TreeID] TEXT, [name] TEXT, [_commit] INTEGER);\n CREATE UNIQUE INDEX [idx_item__item_id]\n ON [item] ([_item_id]);\n CREATE TABLE [item_version] (\n [_id] INTEGER PRIMARY KEY,\n [_item] INTEGER REFERENCES [item]([_id]),\n [_version] INTEGER,\n [_commit] INTEGER REFERENCES [commits]([id]),\n [TreeID] TEXT,\n [name] TEXT\n );"""'], {}), '(\n """\n CREATE TABLE [namespaces] (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT\n );\n CREATE UNIQUE INDEX [idx_namespaces_name]\n ON [namespaces] ([name]);\n CREATE TABLE [commits] (\n [id] INTEGER PRIMARY KEY,\n [namespace] INTEGER REFERENCES [namespaces]([id]),\n [hash] TEXT,\n [commit_at] TEXT\n );\n CREATE UNIQUE INDEX [idx_commits_namespace_hash]\n ON [commits] ([namespace], [hash]);\n CREATE TABLE [item] (\n [_id] INTEGER PRIMARY KEY,\n [_item_id] TEXT\n , [TreeID] TEXT, [name] TEXT, [_commit] INTEGER);\n CREATE UNIQUE INDEX [idx_item__item_id]\n ON [item] ([_item_id]);\n CREATE TABLE [item_version] (\n [_id] INTEGER PRIMARY KEY,\n [_item] INTEGER REFERENCES [item]([_id]),\n [_version] INTEGER,\n [_commit] INTEGER REFERENCES [commits]([id]),\n [TreeID] TEXT,\n [name] TEXT\n );"""\n )\n', (21964, 22991), False, 'import textwrap\n')]
import mysql.connector import progressbar import argparse import yaml import re import collections def main(): parser = argparse.ArgumentParser() parser.add_argument('--out', type=str, default='data/racist/racist.txt', help='text file where the data is written to') args = parser.parse_args() with open('config.yml', 'r') as c: config = yaml.load(c) generate_dataset(args, config) def iter_row(cursor, size=1000): while True: rows = cursor.fetchmany(size) if not rows: break for row in rows: yield row def generate_dataset(args, config): cnx = mysql.connector.connect(user=config["database"]["user"], password=config["database"]["password"], host=config["database"]["host"], database=config["database"]["db"]) cursor = cnx.cursor() cursor.execute('SELECT count(*) FROM comment') count = cursor.fetchone()[0] bar = progressbar.ProgressBar(max_value=count) # This query groups comments by posts and places subcomments after their parent comments to # have as much context between the comments as possible. Everything is sorted ASC by date. print('Executing SQL query...') cursor.execute(''' # Parent comments SELECT p.message, user.name, post.created_time as post_created_time, p.created_time as comment_created_time, Null as subcomment_created_time FROM comment p JOIN user ON user.id = p.user JOIN post ON post.id = p.post WHERE p.parent_comment IS NULL UNION # Child comments SELECT c.message, user.name, post.created_time as post_created_time, p.created_time as comment_created_time, c.created_time as subcomment_created_time FROM comment c JOIN user ON user.id = c.user JOIN comment p on p.id = c.parent_comment JOIN post ON post.id = p.post ORDER BY post_created_time ASC, comment_created_time ASC, subcomment_created_time ASC LIMIT 300000 ''') print('Done') ds = Dataset() # As people tend to reference other people in subcomments, we collect the names of # all subcomment authors to remove them from the result in the end. authors = set() comments = [] for (message, author, post_date, comment_date, subcomment_date) in bar(iter_row(cursor)): if subcomment_date is None: # is parent ds.push(comments, authors) authors = {author} comments = [message] else: # is child authors.add(author) comments.append(message) ds.write(args.out) class Dataset: def __init__(self): self.batches = [] self.vocab_counter = collections.Counter() def write(self, outfile): """Writes the dataset to a text file""" output = self.create_output() ending = outfile.split('.')[-1] if ending == 'txt': with open(outfile, "wb") as f: f.write(output) # TODO add bzip else: raise ValueError('outfile has to be a .txt file') @profile def push(self, comments, authors): """Adds a new bathch of comments to the dataset. The set of authors ist used to further clean the comments""" lines = [] for comment in comments: lines.extend(comment.replace('\r', '\n').split('\n')) txt = '' authors = [re.escape(author) for author in authors] for line in lines: line = self.remove_usernames(line, authors) if 4 < len(line) < 500: txt += '> {}\n'.format(line) self.batches.append(txt) self.vocab_counter.update(txt) def remove_usernames(self, text, authors): """Removing user names that the crawler was not able to filter out because they were not returned in Graph API's message_tags""" # First remove the old fashined @ tags if len(text) == 0 or ('@' in text and len(text.split(' ')) <= 3): return '' if text[0] == '@': text = re.sub('@ ?.*?((:|,|\.| {2})| .*?[:,. ])', '', text) else: text = re.sub('@', '', text) # Then the names of all the authors from the comment and it's subcomments because they mainly reference each other text = re.sub('({})'.format('|'.join(authors)), '', text) return text.strip() @profile def create_output(self): """Generates one big cp1252 string""" output = ''.join(self.batches) #Remove all characters that appear in less than 0.002% of the cases threshold = len(output) * 0.00002 chars_to_remove = [] for char, count in reversed(self.vocab_counter.most_common()): if count < threshold: chars_to_remove.append(char) else: break output = re.sub('[' + re.escape(''.join(chars_to_remove)) + ']', '', output) return output.encode("cp1252", errors="ignore") def merge_lines(self, lines): """Cleans and selects qualifying lines and merges them to a string""" txt = '' for line in lines: line = self.clean_tags(line) if 4 < len(line) < 500: txt += '> {}\n'.format(line) return txt if __name__ == '__main__': main()
[ "yaml.load", "argparse.ArgumentParser", "re.escape", "collections.Counter", "progressbar.ProgressBar", "re.sub" ]
[((126, 151), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (149, 151), False, 'import argparse\n'), ((1048, 1088), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {'max_value': 'count'}), '(max_value=count)\n', (1071, 1088), False, 'import progressbar\n'), ((388, 400), 'yaml.load', 'yaml.load', (['c'], {}), '(c)\n', (397, 400), False, 'import yaml\n'), ((2939, 2960), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (2958, 2960), False, 'import collections\n'), ((3646, 3663), 're.escape', 're.escape', (['author'], {}), '(author)\n', (3655, 3663), False, 'import re\n'), ((4297, 4350), 're.sub', 're.sub', (['"""@ ?.*?((:|,|\\\\.| {2})| .*?[:,. ])"""', '""""""', 'text'], {}), "('@ ?.*?((:|,|\\\\.| {2})| .*?[:,. ])', '', text)\n", (4303, 4350), False, 'import re\n'), ((4383, 4404), 're.sub', 're.sub', (['"""@"""', '""""""', 'text'], {}), "('@', '', text)\n", (4389, 4404), False, 'import re\n')]
import os import glob import csv import argparse from xlsxwriter.workbook import Workbook def arguments(): parser = argparse.ArgumentParser() parser.add_argument('path', default = os.getcwd(), help = "Path to CSV files") parser.add_argument('--outname', default = None, help = "Name of output XLSX file") return parser.parse_args() def wrap_csvs(csvpath, outname): directory_path = os.path.abspath(csvpath) if outname is None: filename = os.path.basename(directory_path + ".xlsx") else: filename = outname workbook_name = os.path.join(directory_path, filename) workbook = Workbook(workbook_name) for c in glob.glob(os.path.join(csvpath, "*.csv")): sheetname = os.path.basename(c[:c.rfind(".")]) print("Adding {} to {}".format(c, workbook_name)) worksheet = workbook.add_worksheet(sheetname) with open(c, 'r') as f: reader = csv.reader(f) for rindex, row in enumerate(reader): for cindex, col in enumerate(row): try: worksheet.write(rindex,cindex, float(col)) except ValueError: worksheet.write(rindex, cindex, col) workbook.close() def main(): args = arguments() wrap_csvs(args.path, args.outname) if __name__ == "__main__": main()
[ "os.path.abspath", "csv.reader", "argparse.ArgumentParser", "os.path.basename", "os.getcwd", "xlsxwriter.workbook.Workbook", "os.path.join" ]
[((122, 147), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (145, 147), False, 'import argparse\n'), ((410, 434), 'os.path.abspath', 'os.path.abspath', (['csvpath'], {}), '(csvpath)\n', (425, 434), False, 'import os\n'), ((584, 622), 'os.path.join', 'os.path.join', (['directory_path', 'filename'], {}), '(directory_path, filename)\n', (596, 622), False, 'import os\n'), ((638, 661), 'xlsxwriter.workbook.Workbook', 'Workbook', (['workbook_name'], {}), '(workbook_name)\n', (646, 661), False, 'from xlsxwriter.workbook import Workbook\n'), ((483, 525), 'os.path.basename', 'os.path.basename', (["(directory_path + '.xlsx')"], {}), "(directory_path + '.xlsx')\n", (499, 525), False, 'import os\n'), ((686, 716), 'os.path.join', 'os.path.join', (['csvpath', '"""*.csv"""'], {}), "(csvpath, '*.csv')\n", (698, 716), False, 'import os\n'), ((190, 201), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (199, 201), False, 'import os\n'), ((945, 958), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (955, 958), False, 'import csv\n')]
from django.utils.translation import ugettext_lazy as _ import horizon from openstack_dashboard.local.local_settings import SIGNUP_ROLES, OPENSTACK_API_VERSIONS from openstack_dashboard.dashboards.identity.signups.common import get_admin_ksclient class Signups(horizon.Panel): name = _("Signups") slug = 'signups' def allowed(self, context): # check if user has special signup role user = context['request'].user.id tenant = context['request'].user.tenant_id keystone = get_admin_ksclient() if OPENSTACK_API_VERSIONS['identity'] == 3: roles = keystone.roles.list(user=user, project=tenant) else: roles = keystone.roles.roles_for_user(user, tenant) for role in roles: if role.name in SIGNUP_ROLES or role.id in SIGNUP_ROLES: return True return False
[ "django.utils.translation.ugettext_lazy", "openstack_dashboard.dashboards.identity.signups.common.get_admin_ksclient" ]
[((290, 302), 'django.utils.translation.ugettext_lazy', '_', (['"""Signups"""'], {}), "('Signups')\n", (291, 302), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((518, 538), 'openstack_dashboard.dashboards.identity.signups.common.get_admin_ksclient', 'get_admin_ksclient', ([], {}), '()\n', (536, 538), False, 'from openstack_dashboard.dashboards.identity.signups.common import get_admin_ksclient\n')]
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. # Adapted from https://github.com/microsoft/CodeXGLUE/blob/main/Text-Code/NL-code-search-Adv/evaluator/evaluator.py import logging import sys, json import numpy as np def read_answers(filename): answers = {} with open(filename) as f: for idx, line in enumerate(f): line = line.strip() js = json.loads(line) answers[str(idx)] = str(idx) return answers def read_predictions(filename): predictions = {} with open(filename) as f: for idx, line in enumerate(f): line = line.strip() js = json.loads(line) predictions[str(idx)] = js['answers'] return predictions def calculate_scores(answers, predictions): scores = [] for key in answers: # import ipdb # ipdb.set_trace() if key not in predictions: logging.error("Missing prediction for url {}.".format(key)) sys.exit() flag = False for rank, idx in enumerate(predictions[key]): if idx == answers[key]: scores.append(1 / (rank + 1)) flag = True break if flag is False: scores.append(0) result = {} result['MRR'] = round(np.mean(scores), 4) return result def main(): import argparse parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for POJ-104 dataset.') parser.add_argument('--answers', '-a', help="filename of the labels, in txt format.") parser.add_argument('--predictions', '-p', help="filename of the leaderboard predictions, in txt format.") args = parser.parse_args() print("reading gold answers") answers = read_answers(args.answers) print("reading predcited answers") predictions = read_predictions(args.predictions) print("computing scores") scores = calculate_scores(answers, predictions) print(scores) if __name__ == '__main__': main() # python mrr.py -a /home/wasiahmad/workspace/projects/NeuralKpGen/data/scikp/kp20k_separated/KP20k.test.jsonl -p /home/rizwan/DPR/predictions_KP20k.jsonl
[ "numpy.mean", "argparse.ArgumentParser", "sys.exit", "json.loads" ]
[((1399, 1496), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate leaderboard predictions for POJ-104 dataset."""'}), "(description=\n 'Evaluate leaderboard predictions for POJ-104 dataset.')\n", (1422, 1496), False, 'import argparse\n'), ((1314, 1329), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (1321, 1329), True, 'import numpy as np\n'), ((405, 421), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (415, 421), False, 'import sys, json\n'), ((655, 671), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (665, 671), False, 'import sys, json\n'), ((999, 1009), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1007, 1009), False, 'import sys, json\n')]
import threading # # @author andy # class LongAdder: def __init__(self): self._lock = threading.Lock() self._value = 0 def get_value(self): return self._value def increment(self): with self._lock: self._value += 1 def decrement(self): with self._lock: self._value -= 1 def set(self, amount: int): with self._lock: self._value += amount def reset(self): with self._lock: self._value = 0
[ "threading.Lock" ]
[((99, 115), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (113, 115), False, 'import threading\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- import rospy import actionlib import math from trajectory_msgs.msg import JointTrajectry from trajectry_msgs.msg import JointTrajectoryPoint from control_msgs.msg import JointTrajectory from sensor_msgs.msg import LaserScan i=0 def callback(msg): rospy.loginfo('min %f -(%f)-> max %f'%(msg.angle_min, msg.angle_increment, msg.angle_max)) # トピック名,メッセージ型を使って ActionLib client を定義 client = actionlib.SimpleActionClient('/fullbody_controller/follow_joint_trajectory', FollowJointTrajectoryAction) client.wait_for_server() # ActionLib のサーバと通信が接続されることを確認 # gen msg traj_msg = FollowJointTrajectoryGoal() traj_msg.trajectory = JointTrajectory() traj_msg.trajectory.header.stamp = rospy.Time.now() + rospy.Duration(0.2) traj_msg.trajectory.joint_names = ['JOINT0', 'JOINT1', 'JOINT2', 'JOINT3', 'JOINT4', 'JOINT5', 'JOINT6', 'JOINT7', 'JOINT8', 'JOINT9', 'JOINT10', 'JOINT11', 'JOINT12', 'JOINT13', 'JOINT14', 'JOINT15', 'JOINT16', 'JOINT17', 'JOINT18', 'JOINT19', 'JOINT20', 'JOINT21', 'JOINT22', 'JOINT23', 'JOINT24', 'JOINT25', 'JOINT26', 'JOINT27', 'JOINT28', 'JOINT29'] global i val = int(-msg.angle_min / msg.angle_increment) if (msg.ranges[val] < 100): i += 1 traj_msg.trajectory.points.append(JointTrajectoryPoint(positions=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, i, 0, i, 0, 0, 0, 0, i, 0, i], time_from_start = rospy.Duration(1.0 + i))) #目標姿勢をゴールとして送信 client.send_goal(traj_msg) rospy.loginfo("wait for goal ...") client.wait_for_result() #ロボットの動作が終わるまで待つ rospy.loginfo("done") if __name__ == '__main__': rospy.init_node('range_listener', anonymous=True) rospy.Subscriber("/range_sensor", LaserScan, callback) rospy.spin()
[ "rospy.Subscriber", "rospy.Time.now", "actionlib.SimpleActionClient", "rospy.loginfo", "control_msgs.msg.JointTrajectory", "rospy.init_node", "rospy.spin", "rospy.Duration" ]
[((301, 397), 'rospy.loginfo', 'rospy.loginfo', (["('min %f -(%f)-> max %f' % (msg.angle_min, msg.angle_increment, msg.angle_max))"], {}), "('min %f -(%f)-> max %f' % (msg.angle_min, msg.angle_increment,\n msg.angle_max))\n", (314, 397), False, 'import rospy\n'), ((452, 561), 'actionlib.SimpleActionClient', 'actionlib.SimpleActionClient', (['"""/fullbody_controller/follow_joint_trajectory"""', 'FollowJointTrajectoryAction'], {}), "('/fullbody_controller/follow_joint_trajectory',\n FollowJointTrajectoryAction)\n", (480, 561), False, 'import actionlib\n'), ((701, 718), 'control_msgs.msg.JointTrajectory', 'JointTrajectory', ([], {}), '()\n', (716, 718), False, 'from control_msgs.msg import JointTrajectory\n'), ((1690, 1739), 'rospy.init_node', 'rospy.init_node', (['"""range_listener"""'], {'anonymous': '(True)'}), "('range_listener', anonymous=True)\n", (1705, 1739), False, 'import rospy\n'), ((1744, 1798), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/range_sensor"""', 'LaserScan', 'callback'], {}), "('/range_sensor', LaserScan, callback)\n", (1760, 1798), False, 'import rospy\n'), ((1803, 1815), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1813, 1815), False, 'import rospy\n'), ((758, 774), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (772, 774), False, 'import rospy\n'), ((777, 796), 'rospy.Duration', 'rospy.Duration', (['(0.2)'], {}), '(0.2)\n', (791, 796), False, 'import rospy\n'), ((1542, 1576), 'rospy.loginfo', 'rospy.loginfo', (['"""wait for goal ..."""'], {}), "('wait for goal ...')\n", (1555, 1576), False, 'import rospy\n'), ((1635, 1656), 'rospy.loginfo', 'rospy.loginfo', (['"""done"""'], {}), "('done')\n", (1648, 1656), False, 'import rospy\n'), ((1450, 1473), 'rospy.Duration', 'rospy.Duration', (['(1.0 + i)'], {}), '(1.0 + i)\n', (1464, 1473), False, 'import rospy\n')]
import json from datetime import datetime from django.db.models import Model from jsonpath_ng import parse from rdflib import Graph, URIRef from safetydance import step_data from safetydance_django.steps import ( # noqa: F401 http_client, http_response, json_values_match, ) from safetydance_django.test import Http # noqa: F401 from safetydance_test.step_extension import step_extension from scrud_django.models import Resource, ResourceType __all__ = [ 'an_instance_named', 'is_valid_resource', 'is_valid_resource_type', 'an_registration_data', 'check_registration_results', 'json_path_expressions', 'json_path_match', 'json_path_matches', 'ld_context_states_sub_class_of', 'named_instances', 'resource_json_is', 'response_json_matches', ] named_instances = step_data(dict, initializer=dict) json_path_expressions = step_data(dict, initializer=dict) json_path_match = step_data(object) sub_class_of = URIRef("http://www.w3.org/2000/01/rdf-schema#subClassOf") @step_extension def an_instance_named(name: str, instance: Model): named_instances[name] = instance @step_extension def is_valid_resource(name: str): instance = named_instances[name] assert isinstance(instance, Resource) assert isinstance(instance.content, dict) assert isinstance(instance.modified_at, datetime) assert isinstance(instance.etag, str) assert len(instance.etag) == 32 @step_extension def is_valid_resource_type(name: str): instance = named_instances[name] assert isinstance(instance, ResourceType) assert isinstance(instance.type_uri, str) assert len(instance.type_uri) > 0 @step_extension def ld_context_states_sub_class_of(subclass_uri, superclass_uri): data = http_response.json() g = Graph().parse(data=json.dumps(data), format='json-ld') superclasses = list(g.transitive_objects(subclass_uri, sub_class_of)) assert len(superclasses) > 0, superclasses assert superclass_uri in superclasses, superclasses # register named_registration_data = step_data(dict, initializer=dict) @step_extension def an_registration_data(name: str, registration_data: dict): named_registration_data[name] = registration_data @step_extension def check_registration_results(name: str, result): expected = named_registration_data[name] # result = register_resource_type(**expected) resource_type_name = list(expected.keys())[0] expected = expected[resource_type_name] assert result.type_uri == expected['rdf_type_uri'] assert result.slug == resource_type_name assert result.context_uri == expected['json_ld_context_url'] assert result.schema_uri == expected['json_schema_url'] def get_compiled_expression(expr, compiled_expressions): compiled_expr = compiled_expressions.get(expr, None) if compiled_expr is None: compiled_expr = parse(expr) compiled_expressions[expr] = compiled_expr return compiled_expr @step_extension def json_path_matches(expr, condition): data = http_response.json() compiled_expr = get_compiled_expression(expr, json_path_expressions) json_path_match = compiled_expr.find(data) assert len(json_path_match) > 0, data match = json_path_match[0].value assert condition( json_path_match ), f"expr: {expr} \ncondition: {condition}, \n match: {match}" def resource_envelope_json_is(resource_data: dict): assert 'href' in resource_data assert isinstance(resource_data['href'], str) assert 'last_modified' in resource_data assert isinstance(resource_data['last_modified'], str) assert 'etag' in resource_data assert isinstance(resource_data['etag'], str) assert 'content' in resource_data assert isinstance(resource_data['content'], dict) response = http_response.json() assert response["content"] == resource_data["content"] def resource_json_is(resource_data): response = http_response.json() assert response == resource_data, response def response_json_matches(expected): observed = http_response.json() assert json_values_match(expected, observed), http_response resource_envelope_json_is = step_extension( f=resource_envelope_json_is, target_type=Http ) resource_json_is = step_extension(f=resource_json_is, target_type=Http) response_json_matches = step_extension(f=response_json_matches, target_type=Http)
[ "rdflib.Graph", "safetydance_test.step_extension.step_extension", "json.dumps", "rdflib.URIRef", "jsonpath_ng.parse", "safetydance.step_data", "safetydance_django.steps.http_response.json", "safetydance_django.steps.json_values_match" ]
[((827, 860), 'safetydance.step_data', 'step_data', (['dict'], {'initializer': 'dict'}), '(dict, initializer=dict)\n', (836, 860), False, 'from safetydance import step_data\n'), ((885, 918), 'safetydance.step_data', 'step_data', (['dict'], {'initializer': 'dict'}), '(dict, initializer=dict)\n', (894, 918), False, 'from safetydance import step_data\n'), ((937, 954), 'safetydance.step_data', 'step_data', (['object'], {}), '(object)\n', (946, 954), False, 'from safetydance import step_data\n'), ((972, 1029), 'rdflib.URIRef', 'URIRef', (['"""http://www.w3.org/2000/01/rdf-schema#subClassOf"""'], {}), "('http://www.w3.org/2000/01/rdf-schema#subClassOf')\n", (978, 1029), False, 'from rdflib import Graph, URIRef\n'), ((2065, 2098), 'safetydance.step_data', 'step_data', (['dict'], {'initializer': 'dict'}), '(dict, initializer=dict)\n', (2074, 2098), False, 'from safetydance import step_data\n'), ((4186, 4247), 'safetydance_test.step_extension.step_extension', 'step_extension', ([], {'f': 'resource_envelope_json_is', 'target_type': 'Http'}), '(f=resource_envelope_json_is, target_type=Http)\n', (4200, 4247), False, 'from safetydance_test.step_extension import step_extension\n'), ((4273, 4325), 'safetydance_test.step_extension.step_extension', 'step_extension', ([], {'f': 'resource_json_is', 'target_type': 'Http'}), '(f=resource_json_is, target_type=Http)\n', (4287, 4325), False, 'from safetydance_test.step_extension import step_extension\n'), ((4350, 4407), 'safetydance_test.step_extension.step_extension', 'step_extension', ([], {'f': 'response_json_matches', 'target_type': 'Http'}), '(f=response_json_matches, target_type=Http)\n', (4364, 4407), False, 'from safetydance_test.step_extension import step_extension\n'), ((1764, 1784), 'safetydance_django.steps.http_response.json', 'http_response.json', ([], {}), '()\n', (1782, 1784), False, 'from safetydance_django.steps import http_client, http_response, json_values_match\n'), ((3044, 3064), 'safetydance_django.steps.http_response.json', 'http_response.json', ([], {}), '()\n', (3062, 3064), False, 'from safetydance_django.steps import http_client, http_response, json_values_match\n'), ((3815, 3835), 'safetydance_django.steps.http_response.json', 'http_response.json', ([], {}), '()\n', (3833, 3835), False, 'from safetydance_django.steps import http_client, http_response, json_values_match\n'), ((3949, 3969), 'safetydance_django.steps.http_response.json', 'http_response.json', ([], {}), '()\n', (3967, 3969), False, 'from safetydance_django.steps import http_client, http_response, json_values_match\n'), ((4071, 4091), 'safetydance_django.steps.http_response.json', 'http_response.json', ([], {}), '()\n', (4089, 4091), False, 'from safetydance_django.steps import http_client, http_response, json_values_match\n'), ((4103, 4140), 'safetydance_django.steps.json_values_match', 'json_values_match', (['expected', 'observed'], {}), '(expected, observed)\n', (4120, 4140), False, 'from safetydance_django.steps import http_client, http_response, json_values_match\n'), ((2887, 2898), 'jsonpath_ng.parse', 'parse', (['expr'], {}), '(expr)\n', (2892, 2898), False, 'from jsonpath_ng import parse\n'), ((1793, 1800), 'rdflib.Graph', 'Graph', ([], {}), '()\n', (1798, 1800), False, 'from rdflib import Graph, URIRef\n'), ((1812, 1828), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1822, 1828), False, 'import json\n')]
__author__ = "<NAME>" __copyright__ = "Copyright 2015, <NAME>" __email__ = "<EMAIL>" __license__ = "MIT" import os import sys import mimetypes import base64 import textwrap import datetime import io import uuid import json import time import shutil import subprocess as sp import itertools from collections import namedtuple, defaultdict import requests from docutils.parsers.rst.directives.images import Image, Figure from docutils.parsers.rst import directives from docutils.core import publish_file, publish_parts from snakemake.utils import format from snakemake.logging import logger from snakemake.io import is_flagged, get_flag_value from snakemake.exceptions import WorkflowError from snakemake.script import Snakemake from snakemake import __version__ class EmbeddedMixin(object): """ Replaces the URI of a directive with a base64-encoded version. Useful for embedding images/figures in reports. """ def run(self): """ Image.run() handles most of the """ result = Image.run(self) reference = directives.uri(self.arguments[0]) self.options['uri'] = data_uri_from_file(reference)[0] return result # Create (and register) new image:: and figure:: directives that use a base64 # data URI instead of pointing to a filename. class EmbeddedImage(Image, EmbeddedMixin): pass directives.register_directive('embeddedimage', EmbeddedImage) class EmbeddedFigure(Figure, EmbeddedMixin): pass directives.register_directive('embeddedfigure', EmbeddedFigure) def data_uri(data, filename, encoding="utf8", mime="text/plain"): """Craft a base64 data URI from file with proper encoding and mimetype.""" data = base64.b64encode(data) uri = ("data:{mime};charset={charset};filename={filename};base64,{data}" "".format(filename=filename, mime=mime, charset=encoding, data=data.decode("utf-8"))) return uri def data_uri_from_file(file, defaultenc="utf8"): """Craft a base64 data URI from file with proper encoding and mimetype.""" mime, encoding = mimetypes.guess_type(file) if mime is None: mime = "text/plain" logger.info("Could not detect mimetype for {}, assuming " "text/plain.".format(file)) if encoding is None: encoding = defaultenc with open(file, "rb") as f: return data_uri(f.read(), os.path.basename(file), encoding, mime), mime def report(text, path, stylesheet=os.path.join(os.path.dirname(__file__), "report.css"), defaultenc="utf8", template=None, metadata=None, **files): outmime, _ = mimetypes.guess_type(path) if outmime != "text/html": raise ValueError("Path to report output has to be an HTML file.") definitions = textwrap.dedent(""" .. role:: raw-html(raw) :format: html """) metadata = textwrap.dedent(""" .. container:: :name: metadata {metadata}{date} """).format(metadata=metadata + " | " if metadata else "", date=datetime.date.today().isoformat()) text = format(textwrap.dedent(text), stepout=3) attachments = [] if files: attachments = [textwrap.dedent(""" .. container:: :name: attachments """)] for name, _files in sorted(files.items()): if not isinstance(_files, list): _files = [_files] links = [] for file in sorted(_files): data, _ = data_uri_from_file(file) links.append(':raw-html:`<a href="{data}" download="{filename}" draggable="true">{filename}</a>`'.format( data=data, filename=os.path.basename(file))) links = "\n\n ".join(links) attachments.append(''' .. container:: :name: {name} {name}: {links} '''.format(name=name, links=links)) text = definitions + text + "\n\n" + "\n\n".join(attachments) + metadata overrides = dict() if template is not None: overrides["template"] = template if stylesheet is not None: overrides["stylesheet_path"] = stylesheet html = open(path, "w") publish_file(source=io.StringIO(text), destination=html, writer_name="html", settings_overrides=overrides) class RuleRecord: def __init__(self, job, job_rec): import yaml self.name = job_rec.rule self.singularity_img_url = job_rec.singularity_img_url self.conda_env = None self._conda_env_raw = None if job_rec.conda_env: self._conda_env_raw = base64.b64decode(job_rec.conda_env).decode() self.conda_env = yaml.load(self._conda_env_raw) self.n_jobs = 1 self.output = list(job_rec.output) self.id = uuid.uuid4() def add(self, job_rec): self.n_jobs += 1 self.output.extend(job_rec.output) def __eq__(self, other): return (self.name == other.name and self.conda_env == other.conda_env and self.singularity_img_url == other.singularity_img_url) class JobRecord: def __init__(self): self.rule = None self.starttime = sys.maxsize self.endtime = 0 self.output = [] self.conda_env_file = None self.singularity_img_url = None class FileRecord: def __init__(self, path, job, caption): self.path = path logger.info("Adding {}.".format(self.name)) self.raw_caption = caption self.data_uri, self.mime = data_uri_from_file(path) self.id = uuid.uuid4() self.job = job self.png_uri = None if self.is_img: convert = shutil.which("convert") if convert is not None: try: png = sp.check_output(["convert", "-density", "100", self.path, "png:-"], stderr=sp.PIPE) uri = data_uri(png, os.path.basename(self.path) + ".png", mime="image/png") self.png_uri = uri except sp.CalledProcessError as e: logger.warning("Failed to convert image to png with " "imagemagick convert: {}".format(e.stderr)) else: logger.warning("Command convert not in $PATH. Install " "imagemagick in order to have embedded " "images and pdfs in the report.") def render(self, env, rst_links, results): if self.raw_caption is not None: try: from jinja2 import Template except ImportError as e: raise WorkflowError("Python package jinja2 must be installed to create reports.") job = self.job snakemake = Snakemake(job.input, job.output, job.params, job.wildcards, job.threads, job.resources, job.log, job.dag.workflow.config, job.rule.name, None) try: caption = open(self.raw_caption).read() + rst_links caption = env.from_string(caption).render(snakemake=snakemake, results=results) self.caption = publish_parts(caption, writer_name="html")["body"] except Exception as e: raise WorkflowError("Error loading caption file of output " "marked for report.", e) @property def is_img(self): web_safe = {"image/gif", "image/jpeg", "image/png", "image/svg+xml", "application/pdf"} return self.mime in web_safe @property def is_text(self): text = {"text/csv", "text/plain", "text/tab-separated-values"} return self.mime in text @property def icon(self): if self.is_img: return "image" elif self.is_text: return "file-text" else: return "file" @property def name(self): return os.path.basename(self.path) @property def size(self): """Return size in Bytes.""" return os.path.getsize(self.path) def rulegraph_d3_spec(dag): try: import networkx as nx from networkx.drawing.nx_agraph import graphviz_layout from networkx.readwrite import json_graph except ImportError as e: raise WorkflowError("Python packages networkx and pygraphviz must be " "installed to create reports.", e) g = nx.DiGraph() g.add_nodes_from(sorted(job.rule.name for job in dag.jobs)) for job in dag.jobs: target = job.rule.name for dep in dag.dependencies[job]: source = dep.rule.name g.add_edge(source, target) pos = graphviz_layout(g, "dot", args="-Grankdir=BT") xmax = max(x for x, y in pos.values()) + 100 # add offset to account for labels ymax = max(y for x, y in pos.values()) def encode_node(node): x, y = pos[node] return {"rule": node, "fx": x, "fy": y} nodes = list(map(encode_node, g.nodes)) idx = {node: i for i, node in enumerate(g.nodes)} links = [{"target": idx[u], "source": idx[v], "value": 1} for u, v in g.edges] return {"nodes": nodes, "links": links}, xmax, ymax def get_resource_as_string(url): r = requests.get(url) if r.status_code == requests.codes.ok: return r.text raise WorkflowError("Failed to download resource needed for " "report: {}".format(url)) def auto_report(dag, path): try: from jinja2 import Template, Environment, PackageLoader except ImportError as e: raise WorkflowError("Python package jinja2 must be installed to create reports.") if not path.endswith(".html"): raise WorkflowError("Report file does not end with .html") logger.info("Creating report...") persistence = dag.workflow.persistence results = defaultdict(list) records = defaultdict(JobRecord) recorded_files = set() for job in dag.jobs: for f in itertools.chain(job.expanded_output, job.input): if is_flagged(f, "report") and f not in recorded_files: if not f.exists: raise WorkflowError("File {} marked for report but does " "not exist.".format(f)) if os.path.isfile(f): report_obj = get_flag_value(f, "report") category = report_obj.category or " " results[category].append( FileRecord(f, job, report_obj.caption)) recorded_files.add(f) for f in job.expanded_output: meta = persistence.metadata(f) if not meta: logger.warning("Missing metadata for file {}".format(f)) continue try: job_hash = meta["job_hash"] rule = meta["rule"] rec = records[(job_hash, rule)] rec.rule = rule rec.starttime = min(rec.starttime, meta["starttime"]) rec.endtime = max(rec.endtime, meta["endtime"]) rec.conda_env_file = None rec.conda_env = meta["conda_env"] rec.singularity_img_url = meta["singularity_img_url"] rec.output.append(f) except KeyError as e: print(e) logger.warning("Metadata for file {} was created with a too " "old Snakemake version.".format(f)) for catresults in results.values(): catresults.sort(key=lambda res: res.name) # prepare runtimes runtimes = [{"rule": rec.rule, "runtime": rec.endtime - rec.starttime} for rec in sorted(records.values(), key=lambda rec: rec.rule)] # prepare end times timeline = [{"rule": rec.rule, "starttime": datetime.datetime.fromtimestamp(rec.starttime).isoformat(), "endtime": datetime.datetime.fromtimestamp(rec.endtime).isoformat()} for rec in sorted(records.values(), key=lambda rec: rec.rule)] # prepare per-rule information rules = defaultdict(list) for rec in records.values(): rule = RuleRecord(job, rec) if rec.rule not in rules: rules[rec.rule].append(rule) else: merged = False for other in rules[rec.rule]: if rule == other: other.add(rec) merged = True break if not merged: rules[rec.rule].append(rule) # rulegraph rulegraph, xmax, ymax = rulegraph_d3_spec(dag) env = Environment(loader=PackageLoader("snakemake", "report"), trim_blocks=True, lstrip_blocks=True) env.filters["get_resource_as_string"] = get_resource_as_string rst_links = textwrap.dedent(""" .. _Results: #results .. _Rules: #rules .. _Statistics: #stats {% for cat, catresults in results|dictsort %} .. _{{ cat }}: #results-{{ cat|replace(" ", "_") }} {% for res in catresults %} .. _{{ res.name }}: #{{ res.id }} {% endfor %} {% endfor %} .. _ """) for cat, catresults in results.items(): for res in catresults: res.render(env, rst_links, results) # global description text = "" if dag.workflow.report_text: with open(dag.workflow.report_text) as f: class Snakemake: config = dag.workflow.config text = f.read() + rst_links text = publish_parts(env.from_string(text).render( snakemake=Snakemake, results=results), writer_name="html")["body"] # record time now = "{} {}".format(datetime.datetime.now().ctime(), time.tzname[0]) results_size = sum(res.size for cat in results.values() for res in cat) # render HTML template = env.get_template("report.html") with open(path, "w", encoding="utf-8") as out: out.write(template.render(results=results, results_size=results_size, text=text, rulegraph_nodes=rulegraph["nodes"], rulegraph_links=rulegraph["links"], rulegraph_width=xmax + 20, rulegraph_height=ymax + 20, runtimes=runtimes, timeline=timeline, rules=[rec for recs in rules.values() for rec in recs], version=__version__, now=now)) logger.info("Report created.")
[ "yaml.load", "snakemake.logging.logger.warning", "base64.b64decode", "collections.defaultdict", "os.path.isfile", "snakemake.exceptions.WorkflowError", "docutils.parsers.rst.directives.images.Image.run", "mimetypes.guess_type", "os.path.dirname", "snakemake.logging.logger.info", "requests.get", "networkx.drawing.nx_agraph.graphviz_layout", "itertools.chain", "datetime.datetime.now", "io.StringIO", "os.path.basename", "os.path.getsize", "subprocess.check_output", "shutil.which", "datetime.date.today", "jinja2.PackageLoader", "snakemake.io.is_flagged", "datetime.datetime.fromtimestamp", "snakemake.io.get_flag_value", "networkx.DiGraph", "snakemake.script.Snakemake", "textwrap.dedent", "docutils.core.publish_parts", "uuid.uuid4", "docutils.parsers.rst.directives.uri", "base64.b64encode", "docutils.parsers.rst.directives.register_directive" ]
[((1369, 1430), 'docutils.parsers.rst.directives.register_directive', 'directives.register_directive', (['"""embeddedimage"""', 'EmbeddedImage'], {}), "('embeddedimage', EmbeddedImage)\n", (1398, 1430), False, 'from docutils.parsers.rst import directives\n'), ((1489, 1552), 'docutils.parsers.rst.directives.register_directive', 'directives.register_directive', (['"""embeddedfigure"""', 'EmbeddedFigure'], {}), "('embeddedfigure', EmbeddedFigure)\n", (1518, 1552), False, 'from docutils.parsers.rst import directives\n'), ((1711, 1733), 'base64.b64encode', 'base64.b64encode', (['data'], {}), '(data)\n', (1727, 1733), False, 'import base64\n'), ((2137, 2163), 'mimetypes.guess_type', 'mimetypes.guess_type', (['file'], {}), '(file)\n', (2157, 2163), False, 'import mimetypes\n'), ((2705, 2731), 'mimetypes.guess_type', 'mimetypes.guess_type', (['path'], {}), '(path)\n', (2725, 2731), False, 'import mimetypes\n'), ((2855, 2933), 'textwrap.dedent', 'textwrap.dedent', (['"""\n .. role:: raw-html(raw)\n :format: html\n\n """'], {}), '("""\n .. role:: raw-html(raw)\n :format: html\n\n """)\n', (2870, 2933), False, 'import textwrap\n'), ((8818, 8830), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (8828, 8830), True, 'import networkx as nx\n'), ((9079, 9125), 'networkx.drawing.nx_agraph.graphviz_layout', 'graphviz_layout', (['g', '"""dot"""'], {'args': '"""-Grankdir=BT"""'}), "(g, 'dot', args='-Grankdir=BT')\n", (9094, 9125), False, 'from networkx.drawing.nx_agraph import graphviz_layout\n'), ((9648, 9665), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (9660, 9665), False, 'import requests\n'), ((10177, 10210), 'snakemake.logging.logger.info', 'logger.info', (['"""Creating report..."""'], {}), "('Creating report...')\n", (10188, 10210), False, 'from snakemake.logging import logger\n'), ((10269, 10286), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10280, 10286), False, 'from collections import namedtuple, defaultdict\n'), ((10301, 10323), 'collections.defaultdict', 'defaultdict', (['JobRecord'], {}), '(JobRecord)\n', (10312, 10323), False, 'from collections import namedtuple, defaultdict\n'), ((12545, 12562), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (12556, 12562), False, 'from collections import namedtuple, defaultdict\n'), ((13293, 13626), 'textwrap.dedent', 'textwrap.dedent', (['"""\n\n .. _Results: #results\n .. _Rules: #rules\n .. _Statistics: #stats\n {% for cat, catresults in results|dictsort %}\n .. _{{ cat }}: #results-{{ cat|replace(" ", "_") }}\n {% for res in catresults %}\n .. _{{ res.name }}: #{{ res.id }}\n {% endfor %}\n {% endfor %}\n .. _\n """'], {}), '(\n """\n\n .. _Results: #results\n .. _Rules: #rules\n .. _Statistics: #stats\n {% for cat, catresults in results|dictsort %}\n .. _{{ cat }}: #results-{{ cat|replace(" ", "_") }}\n {% for res in catresults %}\n .. _{{ res.name }}: #{{ res.id }}\n {% endfor %}\n {% endfor %}\n .. _\n """\n )\n', (13308, 13626), False, 'import textwrap\n'), ((15182, 15212), 'snakemake.logging.logger.info', 'logger.info', (['"""Report created."""'], {}), "('Report created.')\n", (15193, 15212), False, 'from snakemake.logging import logger\n'), ((1033, 1048), 'docutils.parsers.rst.directives.images.Image.run', 'Image.run', (['self'], {}), '(self)\n', (1042, 1048), False, 'from docutils.parsers.rst.directives.images import Image, Figure\n'), ((1069, 1102), 'docutils.parsers.rst.directives.uri', 'directives.uri', (['self.arguments[0]'], {}), '(self.arguments[0])\n', (1083, 1102), False, 'from docutils.parsers.rst import directives\n'), ((2554, 2579), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2569, 2579), False, 'import os\n'), ((3177, 3198), 'textwrap.dedent', 'textwrap.dedent', (['text'], {}), '(text)\n', (3192, 3198), False, 'import textwrap\n'), ((4990, 5002), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5000, 5002), False, 'import uuid\n'), ((5784, 5796), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5794, 5796), False, 'import uuid\n'), ((8315, 8342), 'os.path.basename', 'os.path.basename', (['self.path'], {}), '(self.path)\n', (8331, 8342), False, 'import os\n'), ((8429, 8455), 'os.path.getsize', 'os.path.getsize', (['self.path'], {}), '(self.path)\n', (8444, 8455), False, 'import os\n'), ((10119, 10171), 'snakemake.exceptions.WorkflowError', 'WorkflowError', (['"""Report file does not end with .html"""'], {}), "('Report file does not end with .html')\n", (10132, 10171), False, 'from snakemake.exceptions import WorkflowError\n'), ((10393, 10440), 'itertools.chain', 'itertools.chain', (['job.expanded_output', 'job.input'], {}), '(job.expanded_output, job.input)\n', (10408, 10440), False, 'import itertools\n'), ((2950, 3057), 'textwrap.dedent', 'textwrap.dedent', (['"""\n\n .. container::\n :name: metadata\n\n {metadata}{date}\n\n """'], {}), '(\n """\n\n .. container::\n :name: metadata\n\n {metadata}{date}\n\n """\n )\n', (2965, 3057), False, 'import textwrap\n'), ((3270, 3378), 'textwrap.dedent', 'textwrap.dedent', (['"""\n .. container::\n :name: attachments\n\n """'], {}), '(\n """\n .. container::\n :name: attachments\n\n """\n )\n', (3285, 3378), False, 'import textwrap\n'), ((4359, 4376), 'io.StringIO', 'io.StringIO', (['text'], {}), '(text)\n', (4370, 4376), False, 'import io\n'), ((4874, 4904), 'yaml.load', 'yaml.load', (['self._conda_env_raw'], {}), '(self._conda_env_raw)\n', (4883, 4904), False, 'import yaml\n'), ((5894, 5917), 'shutil.which', 'shutil.which', (['"""convert"""'], {}), "('convert')\n", (5906, 5917), False, 'import shutil\n'), ((7065, 7211), 'snakemake.script.Snakemake', 'Snakemake', (['job.input', 'job.output', 'job.params', 'job.wildcards', 'job.threads', 'job.resources', 'job.log', 'job.dag.workflow.config', 'job.rule.name', 'None'], {}), '(job.input, job.output, job.params, job.wildcards, job.threads,\n job.resources, job.log, job.dag.workflow.config, job.rule.name, None)\n', (7074, 7211), False, 'from snakemake.script import Snakemake\n'), ((8681, 8787), 'snakemake.exceptions.WorkflowError', 'WorkflowError', (['"""Python packages networkx and pygraphviz must be installed to create reports."""', 'e'], {}), "(\n 'Python packages networkx and pygraphviz must be installed to create reports.'\n , e)\n", (8694, 8787), False, 'from snakemake.exceptions import WorkflowError\n'), ((9993, 10068), 'snakemake.exceptions.WorkflowError', 'WorkflowError', (['"""Python package jinja2 must be installed to create reports."""'], {}), "('Python package jinja2 must be installed to create reports.')\n", (10006, 10068), False, 'from snakemake.exceptions import WorkflowError\n'), ((13089, 13125), 'jinja2.PackageLoader', 'PackageLoader', (['"""snakemake"""', '"""report"""'], {}), "('snakemake', 'report')\n", (13102, 13125), False, 'from jinja2 import Template, Environment, PackageLoader\n'), ((2448, 2470), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (2464, 2470), False, 'import os\n'), ((6535, 6669), 'snakemake.logging.logger.warning', 'logger.warning', (['"""Command convert not in $PATH. Install imagemagick in order to have embedded images and pdfs in the report."""'], {}), "(\n 'Command convert not in $PATH. Install imagemagick in order to have embedded images and pdfs in the report.'\n )\n", (6549, 6669), False, 'from snakemake.logging import logger\n'), ((10457, 10480), 'snakemake.io.is_flagged', 'is_flagged', (['f', '"""report"""'], {}), "(f, 'report')\n", (10467, 10480), False, 'from snakemake.io import is_flagged, get_flag_value\n'), ((10704, 10721), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (10718, 10721), False, 'import os\n'), ((14221, 14244), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (14242, 14244), False, 'import datetime\n'), ((3123, 3144), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (3142, 3144), False, 'import datetime\n'), ((4800, 4835), 'base64.b64decode', 'base64.b64decode', (['job_rec.conda_env'], {}), '(job_rec.conda_env)\n', (4816, 4835), False, 'import base64\n'), ((6001, 6089), 'subprocess.check_output', 'sp.check_output', (["['convert', '-density', '100', self.path, 'png:-']"], {'stderr': 'sp.PIPE'}), "(['convert', '-density', '100', self.path, 'png:-'], stderr=\n sp.PIPE)\n", (6016, 6089), True, 'import subprocess as sp\n'), ((6937, 7012), 'snakemake.exceptions.WorkflowError', 'WorkflowError', (['"""Python package jinja2 must be installed to create reports."""'], {}), "('Python package jinja2 must be installed to create reports.')\n", (6950, 7012), False, 'from snakemake.exceptions import WorkflowError\n'), ((7546, 7588), 'docutils.core.publish_parts', 'publish_parts', (['caption'], {'writer_name': '"""html"""'}), "(caption, writer_name='html')\n", (7559, 7588), False, 'from docutils.core import publish_file, publish_parts\n'), ((7654, 7729), 'snakemake.exceptions.WorkflowError', 'WorkflowError', (['"""Error loading caption file of output marked for report."""', 'e'], {}), "('Error loading caption file of output marked for report.', e)\n", (7667, 7729), False, 'from snakemake.exceptions import WorkflowError\n'), ((10756, 10783), 'snakemake.io.get_flag_value', 'get_flag_value', (['f', '"""report"""'], {}), "(f, 'report')\n", (10770, 10783), False, 'from snakemake.io import is_flagged, get_flag_value\n'), ((12272, 12318), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['rec.starttime'], {}), '(rec.starttime)\n', (12303, 12318), False, 'import datetime\n'), ((12360, 12404), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['rec.endtime'], {}), '(rec.endtime)\n', (12391, 12404), False, 'import datetime\n'), ((3776, 3798), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (3792, 3798), False, 'import os\n'), ((6167, 6194), 'os.path.basename', 'os.path.basename', (['self.path'], {}), '(self.path)\n', (6183, 6194), False, 'import os\n')]
#!/usr/bin/env python import os import re from flask import Flask, redirect from tumblpy import Tumblpy import app_config app = Flask(app_config.PROJECT_NAME) app.config['PROPAGATE_EXCEPTIONS'] = True @app.route('/dear-mr-president/', methods=['POST']) def _post_to_tumblr(): """ Handles the POST to Tumblr. """ def clean(string): """ Formats a string all pretty. """ return string.replace('-', ' ').replace("id ", "I'd ").replace("didnt", "didn't").replace('i ', 'I ') # Request is a global. Import it down here where we need it. from flask import request def strip_html(value): """ Strips HTML from a string. """ return re.compile(r'</?\S([^=]*=(\s*"[^"]*"|\s*\'[^\']*\'|\S*)|[^>])*?>', re.IGNORECASE).sub('', value) def strip_breaks(value): """ Converts newlines, returns and other breaks to <br/>. """ value = re.sub(r'\r\n|\r|\n', '\n', value) return value.replace('\n', '<br />') caption = u"<p class='intro'>Dear Mr. President,</p><p class='voted' data-vote-type='%s'>%s.</p><p class='message'>%s</p><p class='signature-name'>Signed,<br/>%s from %s</p><p class='footnote'>What do <em>you</em> want President Obama to remember in his second term? Share your message at <a href='http://inauguration2013.tumblr.com/'>NPR's Dear Mr. President</a>.</p>" % ( request.form['voted'], clean(request.form['voted']), strip_breaks(strip_html(request.form['message'])), strip_html(request.form['signed_name']), strip_html(request.form['location']) ) t = Tumblpy( app_key=app_config.TUMBLR_KEY, app_secret=os.environ['TUMBLR_APP_SECRET'], oauth_token=os.environ['TUMBLR_OAUTH_TOKEN'], oauth_token_secret=os.environ['TUMBLR_OAUTH_TOKEN_SECRET']) try: tumblr_post = t.post('post', blog_url=app_config.TUMBLR_URL, params={ 'type': 'photo', 'caption': caption, 'tags': u"%s" % request.form['voted'].replace('-', ''), 'data': request.files['image'] }) except: return 'Sorry, we\'re probably over capacity. Please try again later.' return redirect(u"http://%s/%s#posts" % (app_config.TUMBLR_URL, tumblr_post['id']), code=301) if __name__ == '__main__': app.run(host='0.0.0.0', port=8000, debug=app_config.DEBUG)
[ "flask.redirect", "flask.Flask", "re.sub", "tumblpy.Tumblpy", "re.compile" ]
[((132, 162), 'flask.Flask', 'Flask', (['app_config.PROJECT_NAME'], {}), '(app_config.PROJECT_NAME)\n', (137, 162), False, 'from flask import Flask, redirect\n'), ((1647, 1844), 'tumblpy.Tumblpy', 'Tumblpy', ([], {'app_key': 'app_config.TUMBLR_KEY', 'app_secret': "os.environ['TUMBLR_APP_SECRET']", 'oauth_token': "os.environ['TUMBLR_OAUTH_TOKEN']", 'oauth_token_secret': "os.environ['TUMBLR_OAUTH_TOKEN_SECRET']"}), "(app_key=app_config.TUMBLR_KEY, app_secret=os.environ[\n 'TUMBLR_APP_SECRET'], oauth_token=os.environ['TUMBLR_OAUTH_TOKEN'],\n oauth_token_secret=os.environ['TUMBLR_OAUTH_TOKEN_SECRET'])\n", (1654, 1844), False, 'from tumblpy import Tumblpy\n'), ((2243, 2333), 'flask.redirect', 'redirect', (["(u'http://%s/%s#posts' % (app_config.TUMBLR_URL, tumblr_post['id']))"], {'code': '(301)'}), "(u'http://%s/%s#posts' % (app_config.TUMBLR_URL, tumblr_post['id']),\n code=301)\n", (2251, 2333), False, 'from flask import Flask, redirect\n'), ((951, 988), 're.sub', 're.sub', (['"""\\\\r\\\\n|\\\\r|\\\\n"""', '"""\n"""', 'value'], {}), "('\\\\r\\\\n|\\\\r|\\\\n', '\\n', value)\n", (957, 988), False, 'import re\n'), ((722, 816), 're.compile', 're.compile', (['"""</?\\\\S([^=]*=(\\\\s*"[^"]*"|\\\\s*\\\\\'[^\\\\\']*\\\\\'|\\\\S*)|[^>])*?>"""', 're.IGNORECASE'], {}), '(\'</?\\\\S([^=]*=(\\\\s*"[^"]*"|\\\\s*\\\\\\\'[^\\\\\\\']*\\\\\\\'|\\\\S*)|[^>])*?>\',\n re.IGNORECASE)\n', (732, 816), False, 'import re\n')]
from math import sqrt #Courtesy of Aran-Fey https://chat.stackoverflow.com/transcript/message/47258396#47258396 def circle_octant(r): r2 = r ** 2 y = 0 while y <= r: x = sqrt(r2 - y**2) if x-int(x) >= 0.5: x += 1 else: # If we moved left, find out which adjacent pixel # the circle moved through - either the one to the # left or the one above (or neither?). # To do this, we calculate the x coordinate in the # center between the current pixel and the one below. x2 = sqrt(r2 - (y-0.5)**2) if x2 < int(x)+0.5: yield int(x), y-1 else: yield int(x)+1, y x = int(x) yield x, y if x <= r//2: break y += 1 #Courtesy of Aran-Fey https://chat.stackoverflow.com/transcript/message/47258396#47258396 def circle_coords(x, y, r): for h, v in circle_octant(r): yield x+h, y+v yield x-h, y+v yield x+h, y-v yield x-h, y-v yield x+v, y+h yield x-v, y+h yield x+v, y-h yield x-v, y-h def brush(g, r): """ draws over an iterable of coordinates using a square "brush" of side length 1+2*r """ for x,y in g: for dx in range(-r, r+1): for dy in range(-r, r+1): yield dx+x, dy+y from PIL import Image, ImageDraw SIZE = 500 def render_echo_chamber(r): def screen_circle(center, r, **style): x,y = center draw.chord((x-r,y-r, x+r,y+r), 0, 359, **style) def logical_circle(center, r, **style): logical_x, logical_y = center screen_x = (logical_x + 0.5) * SIZE screen_y = (logical_y + 0.5) * SIZE screen_r = r * SIZE screen_circle((screen_x, screen_y), screen_r, **style) img = Image.new("RGB", (SIZE, SIZE), "white") draw = ImageDraw.Draw(img) if r <= 0.5: logical_circle((0, 0), r, outline="black") else: tiles = set(brush(circle_coords(0,0,r), 2)) for x,y in tiles: logical_circle((x,y), r, outline="black") return img def frange(start, stop, steps): for i in range(steps+1): f = i / float(steps) yield start+(f * (stop-start)) import animation max_radius = 8 frames_per_radius = 64 frames = [] for f in frange(0, max_radius, frames_per_radius*max_radius): print(f) frames.append(render_echo_chamber(f)) animation.make_gif(frames, delay=8, delete_temp_files=True)
[ "PIL.ImageDraw.Draw", "animation.make_gif", "PIL.Image.new", "math.sqrt" ]
[((2554, 2613), 'animation.make_gif', 'animation.make_gif', (['frames'], {'delay': '(8)', 'delete_temp_files': '(True)'}), '(frames, delay=8, delete_temp_files=True)\n', (2572, 2613), False, 'import animation\n'), ((1943, 1982), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(SIZE, SIZE)', '"""white"""'], {}), "('RGB', (SIZE, SIZE), 'white')\n", (1952, 1982), False, 'from PIL import Image, ImageDraw\n'), ((1994, 2013), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (2008, 2013), False, 'from PIL import Image, ImageDraw\n'), ((196, 213), 'math.sqrt', 'sqrt', (['(r2 - y ** 2)'], {}), '(r2 - y ** 2)\n', (200, 213), False, 'from math import sqrt\n'), ((604, 629), 'math.sqrt', 'sqrt', (['(r2 - (y - 0.5) ** 2)'], {}), '(r2 - (y - 0.5) ** 2)\n', (608, 629), False, 'from math import sqrt\n')]
import pyOcean_cpu as ocean s = ocean.cdouble(3+4j) print(s) print(s.asPython()) print(s.imag.asPython()) print(s.real.asPython()) print(int(s.real)) print(float(s.real))
[ "pyOcean_cpu.cdouble" ]
[((33, 56), 'pyOcean_cpu.cdouble', 'ocean.cdouble', (['(3 + 4.0j)'], {}), '(3 + 4.0j)\n', (46, 56), True, 'import pyOcean_cpu as ocean\n')]
# Generated by Django 2.2.4 on 2019-09-11 03:40 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('deck', '0001_initial'), ] operations = [ migrations.CreateModel( name='Board', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('player', models.CharField(max_length=50, verbose_name='jugador')), ], options={ 'verbose_name': 'Tablero', 'verbose_name_plural': 'Tableros', }, ), migrations.CreateModel( name='Game', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ], options={ 'verbose_name': 'Juego', 'verbose_name_plural': 'Juegos', }, ), migrations.CreateModel( name='GameCard', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('position', models.IntegerField(verbose_name='posicion')), ('board', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cards', to='game.Board')), ('card', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='deck.Card')), ], options={ 'verbose_name': 'Carta de Juego', 'verbose_name_plural': 'Carta de Juegos', }, ), migrations.AddField( model_name='board', name='game', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='game.Game'), ), ]
[ "django.db.models.ForeignKey", "django.db.models.IntegerField", "django.db.models.CharField", "django.db.models.AutoField" ]
[((1832, 1910), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""game.Game"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='game.Game')\n", (1849, 1910), False, 'from django.db import migrations, models\n'), ((368, 461), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (384, 461), False, 'from django.db import migrations, models\n'), ((487, 542), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'verbose_name': '"""jugador"""'}), "(max_length=50, verbose_name='jugador')\n", (503, 542), False, 'from django.db import migrations, models\n'), ((803, 896), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (819, 896), False, 'from django.db import migrations, models\n'), ((1153, 1246), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1169, 1246), False, 'from django.db import migrations, models\n'), ((1274, 1318), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""posicion"""'}), "(verbose_name='posicion')\n", (1293, 1318), False, 'from django.db import migrations, models\n'), ((1347, 1453), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""cards"""', 'to': '"""game.Board"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='cards', to='game.Board')\n", (1364, 1453), False, 'from django.db import migrations, models\n'), ((1476, 1554), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""deck.Card"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='deck.Card')\n", (1493, 1554), False, 'from django.db import migrations, models\n')]
#%% Load Dependencies from math import pi from IPython.display import display from pyvlm import LatticeResult, LatticeOptimum from pyvlm import latticesystem_from_json from pyvlm.tools import elliptical_lift_force_distribution #%% Create Lattice System jsonfilepath = '../files/Straight_Wing_Cosine_100.json' lsys = latticesystem_from_json(jsonfilepath) display(lsys) #%% Original Case alpha = 3.0 # degrees beta = 0.0 # degrees lres_org = LatticeResult('Baseline', lsys) lres_org.set_state(alpha=alpha, beta=beta) display(lres_org) #%% Equivalent Elliptical Lift L = lres_org.nfres.CL*lres_org.qfs*lsys.sref lell = elliptical_lift_force_distribution(lsys.srfcs[0].strpy, lsys.bref, L) lopt_ell = LatticeOptimum('Equivalent Elliptical', lsys) lopt_ell.set_state(alpha=alpha) lopt_ell.set_target_lift_force_distribution(lell, rho=1.0, speed=1.0) display(lopt_ell) #%% Calculate Unconstrained Optimum # Constrain Only Lift and the resulting lift distribution is Elliptical. # Note: You have to consider root moment for both sides of the lifting surface. lopt1 = LatticeOptimum('Unconstrained Optimised', lsys)#, sym=False) lopt1.set_state() lopt1.add_constraint('L', L) lopt1.add_record('l', strplst=lsys.lstrpi) lopt1.add_record('l', strplst=lsys.mstrpi) phi1, lam1 = lopt1.optimum_lift_force_distribution() display(lopt1) #%% Calculate Constrained Optimum # Constrain Root Bending Moment (Rolling Moment) to 75% of the Elliptical Optimimum. # Note: You have to consider both sides of the lifting surface. lspec = lopt1.record[0].value*0.75 mspec = lopt1.record[1].value*0.75 lopt2 = LatticeOptimum('Constrained Optimised', lsys)#, sym=False) lopt2.set_state() lopt2.add_constraint('L', L) lopt2.add_constraint('l', lspec, strplst=lsys.lstrpi) lopt2.add_constraint('l', mspec, strplst=lsys.mstrpi) phi2, lam2 = lopt2.optimum_lift_force_distribution() display(lopt2) #%% Plot Distributions axl = None axl = lres_org.plot_trefftz_lift_force_distribution(ax=axl) axl = lopt_ell.plot_trefftz_lift_force_distribution(ax=axl) axl = lopt1.plot_trefftz_lift_force_distribution(ax=axl) axl = lopt2.plot_trefftz_lift_force_distribution(ax=axl) _ = axl.set_ylabel('Lift Distribution') _ = axl.set_xlabel('Span Position') #%% Print Results CDi_org_theory = lres_org.trres.CL**2/pi/lsys.ar/lres_org.trres.e CDi_ell_theory = lres_org.trres.CL**2/pi/lsys.ar print(f'CL_org = {lres_org.trres.CL:.3f}') print(f'CL_ell = {lopt_ell.trres.CL:.3f}') print(f'CL_opt1 = {lopt1.trres.CL:.3f}') print(f'CL_opt2 = {lopt2.trres.CL:.3f}') print('') print(f'CDi_org_theory = {CDi_org_theory:.7f}') print(f'CDi_org = {lres_org.trres.CDi:.7f}') print(f'CDi_ell_theory = {CDi_ell_theory:.7f}') print(f'CDi_ell = {lopt_ell.trres.CDi:.7f}') print(f'CDi_opt1 = {lopt1.trres.CDi:.7f}') print(f'CDi_opt2 = {lopt2.trres.CDi:.7f}') print('') print(f'Efficiency Improvement = {100.0*(1.0-lopt1.trres.CDi/lres_org.trres.CDi):.2f}%')
[ "pyvlm.latticesystem_from_json", "pyvlm.LatticeOptimum", "pyvlm.tools.elliptical_lift_force_distribution", "IPython.display.display", "pyvlm.LatticeResult" ]
[((317, 354), 'pyvlm.latticesystem_from_json', 'latticesystem_from_json', (['jsonfilepath'], {}), '(jsonfilepath)\n', (340, 354), False, 'from pyvlm import latticesystem_from_json\n'), ((355, 368), 'IPython.display.display', 'display', (['lsys'], {}), '(lsys)\n', (362, 368), False, 'from IPython.display import display\n'), ((443, 474), 'pyvlm.LatticeResult', 'LatticeResult', (['"""Baseline"""', 'lsys'], {}), "('Baseline', lsys)\n", (456, 474), False, 'from pyvlm import LatticeResult, LatticeOptimum\n'), ((518, 535), 'IPython.display.display', 'display', (['lres_org'], {}), '(lres_org)\n', (525, 535), False, 'from IPython.display import display\n'), ((620, 689), 'pyvlm.tools.elliptical_lift_force_distribution', 'elliptical_lift_force_distribution', (['lsys.srfcs[0].strpy', 'lsys.bref', 'L'], {}), '(lsys.srfcs[0].strpy, lsys.bref, L)\n', (654, 689), False, 'from pyvlm.tools import elliptical_lift_force_distribution\n'), ((702, 747), 'pyvlm.LatticeOptimum', 'LatticeOptimum', (['"""Equivalent Elliptical"""', 'lsys'], {}), "('Equivalent Elliptical', lsys)\n", (716, 747), False, 'from pyvlm import LatticeResult, LatticeOptimum\n'), ((850, 867), 'IPython.display.display', 'display', (['lopt_ell'], {}), '(lopt_ell)\n', (857, 867), False, 'from IPython.display import display\n'), ((1067, 1114), 'pyvlm.LatticeOptimum', 'LatticeOptimum', (['"""Unconstrained Optimised"""', 'lsys'], {}), "('Unconstrained Optimised', lsys)\n", (1081, 1114), False, 'from pyvlm import LatticeResult, LatticeOptimum\n'), ((1314, 1328), 'IPython.display.display', 'display', (['lopt1'], {}), '(lopt1)\n', (1321, 1328), False, 'from IPython.display import display\n'), ((1593, 1638), 'pyvlm.LatticeOptimum', 'LatticeOptimum', (['"""Constrained Optimised"""', 'lsys'], {}), "('Constrained Optimised', lsys)\n", (1607, 1638), False, 'from pyvlm import LatticeResult, LatticeOptimum\n'), ((1860, 1874), 'IPython.display.display', 'display', (['lopt2'], {}), '(lopt2)\n', (1867, 1874), False, 'from IPython.display import display\n')]
from django.contrib import admin from django.urls import path, include from avaloq_app import views urlpatterns = [ path('', views.review, name='review'), path('avaloq/', include('avaloq_app.urls')), path('admin/', admin.site.urls), path('accounts/', include('registration.backends.default.urls')), ] handler404 = 'avaloq_app.views.page_not_found' handler500='avaloq_app.views.server_error' handler400='avaloq_app.views.bad_request' handler403='avaloq_app.views.permission_denied'
[ "django.urls.path", "django.urls.include" ]
[((121, 158), 'django.urls.path', 'path', (['""""""', 'views.review'], {'name': '"""review"""'}), "('', views.review, name='review')\n", (125, 158), False, 'from django.urls import path, include\n'), ((213, 244), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (217, 244), False, 'from django.urls import path, include\n'), ((180, 206), 'django.urls.include', 'include', (['"""avaloq_app.urls"""'], {}), "('avaloq_app.urls')\n", (187, 206), False, 'from django.urls import path, include\n'), ((268, 313), 'django.urls.include', 'include', (['"""registration.backends.default.urls"""'], {}), "('registration.backends.default.urls')\n", (275, 313), False, 'from django.urls import path, include\n')]
import os import re import sys import webbrowser import yaml import pyautogui import tkinter file = __file__[:-7] meetings = {"meetings": []} def more_option(opt): os.system("cls || clear") # Add if opt == 1: print(" Give alias for your meeting") alias = input(" [User] >>> ") print(" Now paste your meeting invite link") link = input(" [User] >>> ") meetings["meetings"].append(dict({alias: link})) if validate(link): with open(file + "meeting.yml", "w") as f: yaml.dump(meetings, f) print(" Data is added") input("\n [ENTER] >>> ") print() else: more_option(1) # Remove elif opt == 2: i = 1 print("\n Id Alias") for meeting in meetings["meetings"]: x = meeting.keys() print(" " + str(i) + "." + ((5 - len(str(i))) * " "), list(x)[0]) i += 1 print(" Type the id number to delete") alias = int(input(" [User] >>> ")) meetings["meetings"].pop(alias - 1) with open(file + "meeting.yml", "w") as f: yaml.dump(meetings, f) print(" removed") input("\n [ENTER] >>> ") print() # Show elif opt == 3: i = 1 print("\n Id Alias") for meeting in meetings["meetings"]: x = meeting.keys() print(" " + str(i) + "." + ((5 - len(str(i))) * " "), list(x)[0]) i += 1 input("\n [ENTER] >>> ") print() # Attend elif opt == 4: i = 1 print("\n Id Alias") for meeting in meetings["meetings"]: x = meeting.keys() print(" " + str(i) + "." + ((5 - len(str(i))) * " "), list(x)[0]) i += 1 print(" Select the meeting number to start") alias = int(input(" [User] >>> ")) print() open_key_link(meetings["meetings"][alias - 1]) # Help elif opt == 5: display_help() input("\n [ENTER] >>> ") print() # Exit elif opt == 6: exit() else: print(" Incorrect option") start() # Displaying Help def display_help(): print("\n =====================================================") print(" To open your meeting from command line") print("\n\tCheck this following syntax") print("\tpython3 main.py [meeting_number]") print(" Example: python3 main.py 1") print(" =====================================================") print(" To open your meeting from gui") print("\n\tuse this command") print("\tpython3 main.py --gui") print(" =====================================================") print(" To add shortcut in windows") print("\n\t- Right click the opengui.bat") print("\t- Select Properties") print("\t- Click the Shortcut tab") print( """\t- Click in the Shortcut key box and press a letter. For example, if you press the P key,the key combination to run this shortcut is `CTRL+ALT+P`""" ) print(" =====================================================\n") # menu def start(): os.system("cls || clear") print(""" __ __ __ __ __ __ _ _ ___ _ _ ____ ____ ( \\/ )___( ) /__\\ ( )( )( \\( )/ __)( )_( )( ___)( _ \\ ) ((___))(__ /(__)\\ )(__)( ) (( (__ ) _ ( )__) ) / (_/\\/\\_) (____)(__)(__)(______)(_)\\_)\\___)(_) (_)(____)(_)\\_) """) print("==================") print("1. Add meeting") print("2. Remove meeting") print("3. Show meeting") print("4. Attend meeting") print("5. Shortcut") print("6. Exit") print("==================") try: opt = int(input("[User] >>> ")) except ValueError: print("Incorrect option") start more_option(opt) # GUI def gui(): window_main = tkinter.Tk(className=" Meeting Launcher") frame = tkinter.Frame(window_main) frame.pack(expand=True, padx=10, pady=20) tkinter.Label(frame, text="Select your meeting to open", font=("Arial", 12)).pack( pady=10 ) for meeting in meetings["meetings"]: x = meeting.keys() tkinter.Button( frame, text=list(x)[0], height="2", width="40", bg="white", command=lambda link=meeting[list(x)[0]]: find_whom_to_call(link), ).pack(padx=10) window_main.mainloop() # link opener def open_link(link): webbrowser.open(link) f.close() print(" Waiting for browser to open", end="") # banana peeler :) (gives link from yaml) def open_key_link(_alias): key = list(_alias)[0] print() link = _alias[key] find_whom_to_call(link) # LAUNCHER def launch_zoom(loading_symbol): try: x, y = pyautogui.locateCenterOnScreen( file + "Zoom-Launch-Meeting-Button.png", confidence=0.8 ) pyautogui.click(x, y) except TypeError: if loading_symbol == "\\": print("\r Waiting for browser to open [" + loading_symbol + "]", end="") launch_zoom("/") else: print("\r Waiting for browser to open [" + loading_symbol + "]", end="") launch_zoom("\\") def launch_gmeet(loading_symbol): try: x, y = pyautogui.locateCenterOnScreen( file + "google_meet_join.png", confidence=0.8 ) print(x, y) pyautogui.click(x, y) except TypeError: if loading_symbol == "\\": print("\r Waiting for browser to open [" + loading_symbol + "]", end="") launch_gmeet("/") else: print("\r Waiting for browser to open [" + loading_symbol + "]", end="") launch_gmeet("\\") # def launch_teams(loading_symbol): # try: # x, y = pyautogui.locateCenterOnScreen( # file+"Zoom-Launch-Meeting-Button.png", confidence=0.8 # ) # pyautogui.click(x, y) # # except TypeError: # if loading_symbol == "\\": # print("\r Waiting for browser to open [" + loading_symbol + "]", end="") # launch_teams("/") # else: # print("\r Waiting for browser to open [" + loading_symbol + "]", end="") # launch_teams("\\") def find_whom_to_call(link): if re.search("meet.google.com", link): open_link(link) launch_gmeet("\\") elif re.search(".zoom.us", link): open_link(link) launch_zoom("\\") elif re.search("teams.live", link): print(" Chori....") print(" I'm can't launch microsoft teams but I can open it for you") input(" [ENTER] >>> ") open_link(link) # launch_teams("\\") else: print("Wrong link") start() def validate(link): if re.search("meet.google.com", link): return True elif re.search("zoom", link): return True elif re.search("teams.live", link): print(" Chori....") print(" I'm can't launch microsoft teams but I can open it for you") input(" [ENTER] >>> ") return True # launch_teams("\\") else: print(" Wrong link") print(" Try again") input(" [ENTER] >>> ") return False try: if __name__ == "__main__": try: with open(file + "meeting.yml", "r") as f: meetings = yaml.load(f, Loader=yaml.FullLoader) except FileNotFoundError: print("Give alias for your meeting") alias = input("[User] >>> ") print("Now paste your meeting invite link") link = input("[User] >>> ") meetings["meetings"].append(dict({alias: link})) with open(file + "meeting.yml", "w") as f: yaml.dump(meetings, f) # open_link(link) find_whom_to_call(link) if len(sys.argv) == 1: start() else: arg1 = sys.argv[1] if re.match(r"[0-9]*", arg1).group(): i = int(arg1) print() try: open_key_link(meetings["meetings"][i - 1]) except IndexError: print("There is no data on that number") print("Press ENTER to get the menu. To exit press CTRL+C") start() elif arg1.lower() == "--gui": print("Opening gui") gui() elif arg1.lower() == "--version" or arg1.lower() == "-v": print("Version: 1.0.1") else: display_help() except KeyboardInterrupt: os.system("cls || clear") print(""" __ __ __ __ __ __ _ _ ___ _ _ ____ ____ ( \\/ )___( ) /__\\ ( )( )( \\( )/ __)( )_( )( ___)( _ \\ ) ((___))(__ /(__)\\ )(__)( ) (( (__ ) _ ( )__) ) / (_/\\/\\_) (____)(__)(__)(______)(_)\\_)\\___)(_) (_)(____)(_)\\_) """) print("\n\rExited Successfully")
[ "tkinter.Label", "webbrowser.open", "pyautogui.click", "yaml.load", "pyautogui.locateCenterOnScreen", "yaml.dump", "os.system", "re.match", "tkinter.Frame", "re.search", "tkinter.Tk" ]
[((173, 198), 'os.system', 'os.system', (['"""cls || clear"""'], {}), "('cls || clear')\n", (182, 198), False, 'import os\n'), ((3211, 3236), 'os.system', 'os.system', (['"""cls || clear"""'], {}), "('cls || clear')\n", (3220, 3236), False, 'import os\n'), ((3927, 3968), 'tkinter.Tk', 'tkinter.Tk', ([], {'className': '""" Meeting Launcher"""'}), "(className=' Meeting Launcher')\n", (3937, 3968), False, 'import tkinter\n'), ((3981, 4007), 'tkinter.Frame', 'tkinter.Frame', (['window_main'], {}), '(window_main)\n', (3994, 4007), False, 'import tkinter\n'), ((4545, 4566), 'webbrowser.open', 'webbrowser.open', (['link'], {}), '(link)\n', (4560, 4566), False, 'import webbrowser\n'), ((6382, 6416), 're.search', 're.search', (['"""meet.google.com"""', 'link'], {}), "('meet.google.com', link)\n", (6391, 6416), False, 'import re\n'), ((6869, 6903), 're.search', 're.search', (['"""meet.google.com"""', 'link'], {}), "('meet.google.com', link)\n", (6878, 6903), False, 'import re\n'), ((4862, 4953), 'pyautogui.locateCenterOnScreen', 'pyautogui.locateCenterOnScreen', (["(file + 'Zoom-Launch-Meeting-Button.png')"], {'confidence': '(0.8)'}), "(file + 'Zoom-Launch-Meeting-Button.png',\n confidence=0.8)\n", (4892, 4953), False, 'import pyautogui\n'), ((4980, 5001), 'pyautogui.click', 'pyautogui.click', (['x', 'y'], {}), '(x, y)\n', (4995, 5001), False, 'import pyautogui\n'), ((5367, 5444), 'pyautogui.locateCenterOnScreen', 'pyautogui.locateCenterOnScreen', (["(file + 'google_meet_join.png')"], {'confidence': '(0.8)'}), "(file + 'google_meet_join.png', confidence=0.8)\n", (5397, 5444), False, 'import pyautogui\n'), ((5495, 5516), 'pyautogui.click', 'pyautogui.click', (['x', 'y'], {}), '(x, y)\n', (5510, 5516), False, 'import pyautogui\n'), ((6478, 6505), 're.search', 're.search', (['""".zoom.us"""', 'link'], {}), "('.zoom.us', link)\n", (6487, 6505), False, 'import re\n'), ((6934, 6957), 're.search', 're.search', (['"""zoom"""', 'link'], {}), "('zoom', link)\n", (6943, 6957), False, 'import re\n'), ((8717, 8742), 'os.system', 'os.system', (['"""cls || clear"""'], {}), "('cls || clear')\n", (8726, 8742), False, 'import os\n'), ((4058, 4134), 'tkinter.Label', 'tkinter.Label', (['frame'], {'text': '"""Select your meeting to open"""', 'font': "('Arial', 12)"}), "(frame, text='Select your meeting to open', font=('Arial', 12))\n", (4071, 4134), False, 'import tkinter\n'), ((6566, 6595), 're.search', 're.search', (['"""teams.live"""', 'link'], {}), "('teams.live', link)\n", (6575, 6595), False, 'import re\n'), ((6988, 7017), 're.search', 're.search', (['"""teams.live"""', 'link'], {}), "('teams.live', link)\n", (6997, 7017), False, 'import re\n'), ((562, 584), 'yaml.dump', 'yaml.dump', (['meetings', 'f'], {}), '(meetings, f)\n', (571, 584), False, 'import yaml\n'), ((1169, 1191), 'yaml.dump', 'yaml.dump', (['meetings', 'f'], {}), '(meetings, f)\n', (1178, 1191), False, 'import yaml\n'), ((7457, 7493), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (7466, 7493), False, 'import yaml\n'), ((7849, 7871), 'yaml.dump', 'yaml.dump', (['meetings', 'f'], {}), '(meetings, f)\n', (7858, 7871), False, 'import yaml\n'), ((8050, 8074), 're.match', 're.match', (['"""[0-9]*"""', 'arg1'], {}), "('[0-9]*', arg1)\n", (8058, 8074), False, 'import re\n')]
import matplotlib.pyplot as plt def display_metric_vs_epochs_plot(scores, metric, nth_iter, nth_fold): """Display a metric vs. epochs plot. Both the training and validation scores will be plotted for the chosen metric. Parameters ---------- scores : pandas.DataFrame Scores containing the `epoch` column and metric columns such as `acc`, `loss`, `val_acc`, and `val_loss`. metric : string The metric to display, such as 'loss' or 'acc' (note that `val_` is also appended automatically and should not be provided). nth_iter : int The current random search iteration. nth_fold : int The current k-fold fold. Returns ------- None """ metric_fold_scores = scores[metric].values.tolist() epochs = range(1, len(metric_fold_scores) + 1) plt.figure(figsize=(6,4)) plt.plot(epochs, metric_fold_scores, 'bo', label=f'Training {metric}') plt.plot(epochs, scores[f'val_{metric}'].values.tolist(), 'b', label=f'Validation {metric}') plt.title(f'Training and Validation {metric.title()} for ' f'Iteration {nth_iter} Fold {nth_fold}') plt.legend() plt.ylabel(metric.title()) plt.xlabel('Number of Epochs') plt.show()
[ "matplotlib.pyplot.show", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.xlabel" ]
[((847, 873), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (857, 873), True, 'import matplotlib.pyplot as plt\n'), ((877, 947), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'metric_fold_scores', '"""bo"""'], {'label': 'f"""Training {metric}"""'}), "(epochs, metric_fold_scores, 'bo', label=f'Training {metric}')\n", (885, 947), True, 'import matplotlib.pyplot as plt\n'), ((1206, 1218), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1216, 1218), True, 'import matplotlib.pyplot as plt\n'), ((1254, 1284), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Epochs"""'], {}), "('Number of Epochs')\n", (1264, 1284), True, 'import matplotlib.pyplot as plt\n'), ((1289, 1299), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1297, 1299), True, 'import matplotlib.pyplot as plt\n')]
import os # NOQA import sys # NOQA import re # NOQA import math # NOQA import fileinput from collections import Counter, deque, namedtuple # NOQA from itertools import count, product, permutations, combinations, combinations_with_replacement # NOQA from utils import parse_line, mul, factors, memoize, primes, new_table, Point # NOQA # Itertools Functions: # product('ABCD', repeat=2) AA AB AC AD BA BB BC BD CA CB CC CD DA DB DC DD # permutations('ABCD', 2) AB AC AD BA BC BD CA CB CD DA DB DC # combinations('ABCD', 2) AB AC AD BC BD CD # combinations_with_replacement('ABCD', 2) AA AB AC AD BB BC BD CC CD DD total = 0 result = [] table = new_table(None, width=2, height=4) for i, line in enumerate(fileinput.input()): line = line.strip() # data = [x for x in line.split(', ')] # data = [x for x in line] # data = [int(x) for x in line.split()] # data = re.findall(r'(\w+)', line) data = parse_line(r'', line) if i == 0: print(data)
[ "utils.new_table", "fileinput.input", "utils.parse_line" ]
[((713, 747), 'utils.new_table', 'new_table', (['None'], {'width': '(2)', 'height': '(4)'}), '(None, width=2, height=4)\n', (722, 747), False, 'from utils import parse_line, mul, factors, memoize, primes, new_table, Point\n'), ((774, 791), 'fileinput.input', 'fileinput.input', ([], {}), '()\n', (789, 791), False, 'import fileinput\n'), ((988, 1008), 'utils.parse_line', 'parse_line', (['""""""', 'line'], {}), "('', line)\n", (998, 1008), False, 'from utils import parse_line, mul, factors, memoize, primes, new_table, Point\n')]
from __future__ import print_function import argparse import numpy as np import os, csv from dataset import CIFAR10IndexPseudoLabelEnsemble import pickle import torch import torch.nn as nn import torch.nn.functional as F import torchvision.transforms as transforms import torch.utils.data as Data import torch.backends.cudnn as cudnn from utils import progress_bar, TwoCropTransformAdv from losses import SupConLoss import tensorboard_logger as tb_logger from models.resnet_cifar_multibn_ensembleFC import resnet18 as ResNet18 import random from fr_util import generate_high from utils import adjust_learning_rate, warmup_learning_rate import apex # ================================================================== # # Inputs and Pre-definition # # ================================================================== # # Arguments parser = argparse.ArgumentParser() parser.add_argument('--name', type=str, default='advcl_cifar10', help='name of the run') parser.add_argument('--cname', type=str, default='imagenet_clPretrain', help='') parser.add_argument('--batch-size', type=int, default=512, help='batch size') parser.add_argument('--epoch', type=int, default=1000, help='total epochs') parser.add_argument('--save-epoch', type=int, default=100, help='save epochs') parser.add_argument('--epsilon', type=float, default=8, help='The upper bound change of L-inf norm on input pixels') parser.add_argument('--iter', type=int, default=5, help='The number of iterations for iterative attacks') parser.add_argument('--radius', type=int, default=8, help='radius of low freq images') parser.add_argument('--ce_weight', type=float, default=0.2, help='cross entp weight') # contrastive related parser.add_argument('-t', '--nce_t', default=0.5, type=float, help='temperature') parser.add_argument('--seed', default=0, type=float, help='random seed') parser.add_argument('--dataset', type=str, default='cifar10', help='dataset') parser.add_argument('--cosine', action='store_true', help='using cosine annealing') parser.add_argument('--warm', action='store_true', help='warm-up for large batch training') parser.add_argument('--learning_rate', type=float, default=0.5, help='learning rate') parser.add_argument('--lr_decay_epochs', type=str, default='700,800,900', help='where to decay lr, can be a list') parser.add_argument('--lr_decay_rate', type=float, default=0.1, help='decay rate for learning rate') parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay') parser.add_argument('--momentum', type=float, default=0.9, help='momentum') args = parser.parse_args() args.epochs = args.epoch args.decay = args.weight_decay args.cosine = True import math if args.batch_size > 256: args.warm = True if args.warm: args.warmup_from = 0.01 args.warm_epochs = 10 if args.cosine: eta_min = args.learning_rate * (args.lr_decay_rate ** 3) args.warmup_to = eta_min + (args.learning_rate - eta_min) * ( 1 + math.cos(math.pi * args.warm_epochs / args.epochs)) / 2 else: args.warmup_to = args.learning_rate start_epoch = 0 # start from epoch 0 or last checkpoint epoch # Device configuration device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') args.name = 'AdvCL_Cifar10' config = { 'epsilon': args.epsilon / 255., 'num_steps': args.iter, 'step_size': 2.0 / 255, 'random_start': True, 'loss_func': 'xent', } # ================================================================== # # Data and Pre-processing # # ================================================================== # print('=====> Preparing data...') # Multi-cuda if torch.cuda.is_available(): n_gpu = torch.cuda.device_count() batch_size = args.batch_size transform_train = transforms.Compose([ transforms.RandomResizedCrop(size=32, scale=(0.2, 1.)), transforms.RandomHorizontalFlip(), transforms.RandomApply([ transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) ], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.ToTensor(), ]) train_transform_org = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), ]) transform_train = TwoCropTransformAdv(transform_train, train_transform_org) transform_test = transforms.Compose([ transforms.ToTensor(), ]) label_pseudo_train_list = [] num_classes_list = [2, 10, 50, 100, 500] dict_name = 'data/{}_pseudo_labels.pkl'.format(args.cname) f = open(dict_name, 'rb') # Pickle file is newly created where foo1.py is feat_label_dict = pickle.load(f) # dump data to f f.close() for i in range(5): class_num = num_classes_list[i] key_train = 'pseudo_train_{}'.format(class_num) label_pseudo_train = feat_label_dict[key_train] label_pseudo_train_list.append(label_pseudo_train) train_dataset = CIFAR10IndexPseudoLabelEnsemble(root='data', transform=transform_train, pseudoLabel_002=label_pseudo_train_list[0], pseudoLabel_010=label_pseudo_train_list[1], pseudoLabel_050=label_pseudo_train_list[2], pseudoLabel_100=label_pseudo_train_list[3], pseudoLabel_500=label_pseudo_train_list[4], download=True) # Data Loader train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=n_gpu*4) # ================================================================== # # Model, Loss and Optimizer # # ================================================================== # # PGD attack model class AttackPGD(nn.Module): def __init__(self, model, config): super(AttackPGD, self).__init__() self.model = model self.rand = config['random_start'] self.step_size = config['step_size'] self.epsilon = config['epsilon'] self.num_steps = config['num_steps'] assert config['loss_func'] == 'xent', 'Plz use xent for loss function.' def forward(self, images_t1, images_t2, images_org, targets, criterion): x1 = images_t1.clone().detach() x2 = images_t2.clone().detach() x_cl = images_org.clone().detach() x_ce = images_org.clone().detach() images_org_high = generate_high(x_cl.clone(), r=args.radius) x_HFC = images_org_high.clone().detach() if self.rand: x_cl = x_cl + torch.zeros_like(x1).uniform_(-self.epsilon, self.epsilon) x_ce = x_ce + torch.zeros_like(x1).uniform_(-self.epsilon, self.epsilon) for i in range(self.num_steps): x_cl.requires_grad_() x_ce.requires_grad_() with torch.enable_grad(): f_proj, f_pred = self.model(x_cl, bn_name='pgd', contrast=True) fce_proj, fce_pred, logits_ce = self.model(x_ce, bn_name='pgd_ce', contrast=True, CF=True, return_logits=True, nonlinear=False) f1_proj, f1_pred = self.model(x1, bn_name='normal', contrast=True) f2_proj, f2_pred = self.model(x2, bn_name='normal', contrast=True) f_high_proj, f_high_pred = self.model(x_HFC, bn_name='normal', contrast=True) features = torch.cat([f_proj.unsqueeze(1), f1_proj.unsqueeze(1), f2_proj.unsqueeze(1), f_high_proj.unsqueeze(1)], dim=1) loss_contrast = criterion(features) loss_ce = 0 for label_idx in range(5): tgt = targets[label_idx].long() lgt = logits_ce[label_idx] loss_ce += F.cross_entropy(lgt, tgt, size_average=False, ignore_index=-1) / 5. loss = loss_contrast + loss_ce * args.ce_weight grad_x_cl, grad_x_ce = torch.autograd.grad(loss, [x_cl, x_ce]) x_cl = x_cl.detach() + self.step_size * torch.sign(grad_x_cl.detach()) x_cl = torch.min(torch.max(x_cl, images_org - self.epsilon), images_org + self.epsilon) x_cl = torch.clamp(x_cl, 0, 1) x_ce = x_ce.detach() + self.step_size * torch.sign(grad_x_ce.detach()) x_ce = torch.min(torch.max(x_ce, images_org - self.epsilon), images_org + self.epsilon) x_ce = torch.clamp(x_ce, 0, 1) return x1, x2, x_cl, x_ce, x_HFC print('=====> Building model...') bn_names = ['normal', 'pgd', 'pgd_ce'] model = ResNet18(bn_names=bn_names) model = model.cuda() # tb_logger if not os.path.exists('logger'): os.makedirs('logger') logname = ('logger/pretrain_{}'.format(args.name)) logger = tb_logger.Logger(logdir=logname, flush_secs=2) if torch.cuda.device_count() > 1: print("=====> Let's use", torch.cuda.device_count(), "GPUs!") model = apex.parallel.convert_syncbn_model(model) model = nn.DataParallel(model) model = model.cuda() cudnn.benchmark = True else: print('single gpu version is not supported, please use multiple GPUs!') raise NotImplementedError net = AttackPGD(model, config) # Loss and optimizer ce_criterion = nn.CrossEntropyLoss(ignore_index=-1) contrast_criterion = SupConLoss(temperature=args.nce_t) optimizer = torch.optim.SGD(net.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=args.decay) # optimizer = torch.optim.Adam(net.parameters(), lr=args.lr) # ================================================================== # # Train and Test # # ================================================================== # def train(epoch): print('\nEpoch: %d' % epoch) net.train() train_loss = 0 correct = 0 total = 0 for batch_idx, (inputs, _, targets, ind) in enumerate(train_loader): tt = [] for tt_ in targets: tt.append(tt_.to(device).long()) targets = tt image_t1, image_t2, image_org = inputs image_t1 = image_t1.cuda(non_blocking=True) image_t2 = image_t2.cuda(non_blocking=True) image_org = image_org.cuda(non_blocking=True) warmup_learning_rate(args, epoch+1, batch_idx, len(train_loader), optimizer) # attack contrast optimizer.zero_grad() x1, x2, x_cl, x_ce, x_HFC = net(image_t1, image_t2, image_org, targets, contrast_criterion) f_proj, f_pred = model(x_cl, bn_name='pgd', contrast=True) fce_proj, fce_pred, logits_ce = model(x_ce, bn_name='pgd_ce', contrast=True, CF=True, return_logits=True, nonlinear=False) f1_proj, f1_pred = model(x1, bn_name='normal', contrast=True) f2_proj, f2_pred = model(x2, bn_name='normal', contrast=True) f_high_proj, f_high_pred = model(x_HFC, bn_name='normal', contrast=True) features = torch.cat( [f_proj.unsqueeze(1), f1_proj.unsqueeze(1), f2_proj.unsqueeze(1), f_high_proj.unsqueeze(1)], dim=1) contrast_loss = contrast_criterion(features) ce_loss = 0 for label_idx in range(5): tgt = targets[label_idx].long() lgt = logits_ce[label_idx] ce_loss += ce_criterion(lgt, tgt) / 5. loss = contrast_loss + ce_loss * args.ce_weight loss.backward() optimizer.step() train_loss += loss.item() total += targets[0].size(0) progress_bar(batch_idx, len(train_loader), 'Loss: %.3f (%d/%d)' % (train_loss/(batch_idx+1), correct, total)) return train_loss/batch_idx, 0. # ================================================================== # # Checkpoint # # ================================================================== # # Save checkpoint def checkpoint(epoch): print('=====> Saving checkpoint...') state = { 'model': model.state_dict(), 'epoch': epoch, 'rng_state': torch.get_rng_state() } save_dir = './checkpoint/{}'.format(args.name) if not os.path.isdir(save_dir): os.makedirs(save_dir) torch.save(state, '{}/epoch_{}.ckpt'.format(save_dir, epoch)) # ================================================================== # # Run the model # # ================================================================== # np.random.seed(args.seed) random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) for epoch in range(start_epoch, args.epoch+2): adjust_learning_rate(args, optimizer, epoch+1) train_loss, train_acc = train(epoch) logger.log_value('train_loss', train_loss, epoch) logger.log_value('learning_rate', optimizer.param_groups[0]['lr'], epoch) if epoch % args.save_epoch == 0: checkpoint(epoch)
[ "dataset.CIFAR10IndexPseudoLabelEnsemble", "numpy.random.seed", "argparse.ArgumentParser", "torch.autograd.grad", "utils.adjust_learning_rate", "losses.SupConLoss", "torch.get_rng_state", "torch.cuda.device_count", "models.resnet_cifar_multibn_ensembleFC.resnet18", "pickle.load", "tensorboard_logger.Logger", "torch.utils.data.DataLoader", "os.path.exists", "random.seed", "math.cos", "utils.TwoCropTransformAdv", "torchvision.transforms.RandomHorizontalFlip", "torch.zeros_like", "torch.manual_seed", "torch.nn.functional.cross_entropy", "torch.clamp", "torch.cuda.is_available", "torch.max", "torch.enable_grad", "torchvision.transforms.RandomCrop", "torchvision.transforms.ColorJitter", "os.makedirs", "os.path.isdir", "torch.nn.CrossEntropyLoss", "torchvision.transforms.RandomResizedCrop", "torchvision.transforms.RandomGrayscale", "torch.cuda.manual_seed_all", "apex.parallel.convert_syncbn_model", "torch.nn.DataParallel", "torchvision.transforms.ToTensor" ]
[((886, 911), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (909, 911), False, 'import argparse\n'), ((4093, 4118), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4116, 4118), False, 'import torch\n'), ((4706, 4763), 'utils.TwoCropTransformAdv', 'TwoCropTransformAdv', (['transform_train', 'train_transform_org'], {}), '(transform_train, train_transform_org)\n', (4725, 4763), False, 'from utils import progress_bar, TwoCropTransformAdv\n'), ((5057, 5071), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5068, 5071), False, 'import pickle\n'), ((5331, 5655), 'dataset.CIFAR10IndexPseudoLabelEnsemble', 'CIFAR10IndexPseudoLabelEnsemble', ([], {'root': '"""data"""', 'transform': 'transform_train', 'pseudoLabel_002': 'label_pseudo_train_list[0]', 'pseudoLabel_010': 'label_pseudo_train_list[1]', 'pseudoLabel_050': 'label_pseudo_train_list[2]', 'pseudoLabel_100': 'label_pseudo_train_list[3]', 'pseudoLabel_500': 'label_pseudo_train_list[4]', 'download': '(True)'}), "(root='data', transform=transform_train,\n pseudoLabel_002=label_pseudo_train_list[0], pseudoLabel_010=\n label_pseudo_train_list[1], pseudoLabel_050=label_pseudo_train_list[2],\n pseudoLabel_100=label_pseudo_train_list[3], pseudoLabel_500=\n label_pseudo_train_list[4], download=True)\n", (5362, 5655), False, 'from dataset import CIFAR10IndexPseudoLabelEnsemble\n'), ((6003, 6117), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(n_gpu * 4)'}), '(dataset=train_dataset, batch_size=batch_size,\n shuffle=True, num_workers=n_gpu * 4)\n', (6030, 6117), False, 'import torch\n'), ((9225, 9252), 'models.resnet_cifar_multibn_ensembleFC.resnet18', 'ResNet18', ([], {'bn_names': 'bn_names'}), '(bn_names=bn_names)\n', (9233, 9252), True, 'from models.resnet_cifar_multibn_ensembleFC import resnet18 as ResNet18\n'), ((9405, 9451), 'tensorboard_logger.Logger', 'tb_logger.Logger', ([], {'logdir': 'logname', 'flush_secs': '(2)'}), '(logdir=logname, flush_secs=2)\n', (9421, 9451), True, 'import tensorboard_logger as tb_logger\n'), ((9872, 9908), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': '(-1)'}), '(ignore_index=-1)\n', (9891, 9908), True, 'import torch.nn as nn\n'), ((9930, 9964), 'losses.SupConLoss', 'SupConLoss', ([], {'temperature': 'args.nce_t'}), '(temperature=args.nce_t)\n', (9940, 9964), False, 'from losses import SupConLoss\n'), ((13097, 13122), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (13111, 13122), True, 'import numpy as np\n'), ((13123, 13145), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (13134, 13145), False, 'import random\n'), ((13146, 13174), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (13163, 13174), False, 'import torch\n'), ((13175, 13212), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (13201, 13212), False, 'import torch\n'), ((4132, 4157), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4155, 4157), False, 'import torch\n'), ((9293, 9317), 'os.path.exists', 'os.path.exists', (['"""logger"""'], {}), "('logger')\n", (9307, 9317), False, 'import os, csv\n'), ((9323, 9344), 'os.makedirs', 'os.makedirs', (['"""logger"""'], {}), "('logger')\n", (9334, 9344), False, 'import os, csv\n'), ((9455, 9480), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (9478, 9480), False, 'import torch\n'), ((9564, 9605), 'apex.parallel.convert_syncbn_model', 'apex.parallel.convert_syncbn_model', (['model'], {}), '(model)\n', (9598, 9605), False, 'import apex\n'), ((9618, 9640), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (9633, 9640), True, 'import torch.nn as nn\n'), ((13265, 13313), 'utils.adjust_learning_rate', 'adjust_learning_rate', (['args', 'optimizer', '(epoch + 1)'], {}), '(args, optimizer, epoch + 1)\n', (13285, 13313), False, 'from utils import adjust_learning_rate, warmup_learning_rate\n'), ((3608, 3633), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3631, 3633), False, 'import torch\n'), ((4235, 4290), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', ([], {'size': '(32)', 'scale': '(0.2, 1.0)'}), '(size=32, scale=(0.2, 1.0))\n', (4263, 4290), True, 'import torchvision.transforms as transforms\n'), ((4299, 4332), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (4330, 4332), True, 'import torchvision.transforms as transforms\n'), ((4449, 4482), 'torchvision.transforms.RandomGrayscale', 'transforms.RandomGrayscale', ([], {'p': '(0.2)'}), '(p=0.2)\n', (4475, 4482), True, 'import torchvision.transforms as transforms\n'), ((4492, 4513), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4511, 4513), True, 'import torchvision.transforms as transforms\n'), ((4569, 4605), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (4590, 4605), True, 'import torchvision.transforms as transforms\n'), ((4615, 4648), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (4646, 4648), True, 'import torchvision.transforms as transforms\n'), ((4658, 4679), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4677, 4679), True, 'import torchvision.transforms as transforms\n'), ((4807, 4828), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4826, 4828), True, 'import torchvision.transforms as transforms\n'), ((9516, 9541), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (9539, 9541), False, 'import torch\n'), ((12671, 12692), 'torch.get_rng_state', 'torch.get_rng_state', ([], {}), '()\n', (12690, 12692), False, 'import torch\n'), ((12761, 12784), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (12774, 12784), False, 'import os, csv\n'), ((12794, 12815), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (12805, 12815), False, 'import os, csv\n'), ((8610, 8649), 'torch.autograd.grad', 'torch.autograd.grad', (['loss', '[x_cl, x_ce]'], {}), '(loss, [x_cl, x_ce])\n', (8629, 8649), False, 'import torch\n'), ((8852, 8875), 'torch.clamp', 'torch.clamp', (['x_cl', '(0)', '(1)'], {}), '(x_cl, 0, 1)\n', (8863, 8875), False, 'import torch\n'), ((9078, 9101), 'torch.clamp', 'torch.clamp', (['x_ce', '(0)', '(1)'], {}), '(x_ce, 0, 1)\n', (9089, 9101), False, 'import torch\n'), ((4379, 4421), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['(0.4)', '(0.4)', '(0.4)', '(0.1)'], {}), '(0.4, 0.4, 0.4, 0.1)\n', (4401, 4421), True, 'import torchvision.transforms as transforms\n'), ((7548, 7567), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (7565, 7567), False, 'import torch\n'), ((8762, 8804), 'torch.max', 'torch.max', (['x_cl', '(images_org - self.epsilon)'], {}), '(x_cl, images_org - self.epsilon)\n', (8771, 8804), False, 'import torch\n'), ((8988, 9030), 'torch.max', 'torch.max', (['x_ce', '(images_org - self.epsilon)'], {}), '(x_ce, images_org - self.epsilon)\n', (8997, 9030), False, 'import torch\n'), ((3380, 3430), 'math.cos', 'math.cos', (['(math.pi * args.warm_epochs / args.epochs)'], {}), '(math.pi * args.warm_epochs / args.epochs)\n', (3388, 3430), False, 'import math\n'), ((7278, 7298), 'torch.zeros_like', 'torch.zeros_like', (['x1'], {}), '(x1)\n', (7294, 7298), False, 'import torch\n'), ((7363, 7383), 'torch.zeros_like', 'torch.zeros_like', (['x1'], {}), '(x1)\n', (7379, 7383), False, 'import torch\n'), ((8443, 8505), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['lgt', 'tgt'], {'size_average': '(False)', 'ignore_index': '(-1)'}), '(lgt, tgt, size_average=False, ignore_index=-1)\n', (8458, 8505), True, 'import torch.nn.functional as F\n')]
from django.conf import settings from django.conf.urls.static import static from django.urls import path from . import views from django.contrib import messages from django.shortcuts import redirect app_name = 'free' def protected_file(request, path, document_root=None): messages.error(request, "접근 불가") return redirect('/') urlpatterns = [ path('', views.AllListView.as_view(), name='all_list'), path('free/', views.FreeListView.as_view(), name='free_list'), path('question/', views.QuestionListView.as_view(), name='question_list'), path('information/', views.InformationListView.as_view(), name='information_list'), path('write/', views.free_write_view, name='free_write'), path('<int:pk>/', views.free_detail_view, name='free_detail'), path('<int:pk>/edit/', views.free_edit_view, name='free_edit'), path('<int:pk>/delete/', views.free_delete_view, name='free_delete'), path('download/<int:pk>', views.free_download_view, name="free_download"), path('<int:pk>/comment/write/', views.comment_write_view, name='comment_write'), path('<int:pk>/comment/delete/', views.comment_delete_view, name='comment_delete'), ] urlpatterns += static(settings.MEDIA_URL, protected_file, document_root=settings.MEDIA_ROOT)
[ "django.shortcuts.redirect", "django.contrib.messages.error", "django.conf.urls.static.static", "django.urls.path" ]
[((1191, 1268), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL', 'protected_file'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, protected_file, document_root=settings.MEDIA_ROOT)\n', (1197, 1268), False, 'from django.conf.urls.static import static\n'), ((279, 311), 'django.contrib.messages.error', 'messages.error', (['request', '"""접근 불가"""'], {}), "(request, '접근 불가')\n", (293, 311), False, 'from django.contrib import messages\n'), ((323, 336), 'django.shortcuts.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (331, 336), False, 'from django.shortcuts import redirect\n'), ((653, 709), 'django.urls.path', 'path', (['"""write/"""', 'views.free_write_view'], {'name': '"""free_write"""'}), "('write/', views.free_write_view, name='free_write')\n", (657, 709), False, 'from django.urls import path\n'), ((715, 776), 'django.urls.path', 'path', (['"""<int:pk>/"""', 'views.free_detail_view'], {'name': '"""free_detail"""'}), "('<int:pk>/', views.free_detail_view, name='free_detail')\n", (719, 776), False, 'from django.urls import path\n'), ((782, 844), 'django.urls.path', 'path', (['"""<int:pk>/edit/"""', 'views.free_edit_view'], {'name': '"""free_edit"""'}), "('<int:pk>/edit/', views.free_edit_view, name='free_edit')\n", (786, 844), False, 'from django.urls import path\n'), ((850, 918), 'django.urls.path', 'path', (['"""<int:pk>/delete/"""', 'views.free_delete_view'], {'name': '"""free_delete"""'}), "('<int:pk>/delete/', views.free_delete_view, name='free_delete')\n", (854, 918), False, 'from django.urls import path\n'), ((924, 997), 'django.urls.path', 'path', (['"""download/<int:pk>"""', 'views.free_download_view'], {'name': '"""free_download"""'}), "('download/<int:pk>', views.free_download_view, name='free_download')\n", (928, 997), False, 'from django.urls import path\n'), ((1004, 1083), 'django.urls.path', 'path', (['"""<int:pk>/comment/write/"""', 'views.comment_write_view'], {'name': '"""comment_write"""'}), "('<int:pk>/comment/write/', views.comment_write_view, name='comment_write')\n", (1008, 1083), False, 'from django.urls import path\n'), ((1089, 1176), 'django.urls.path', 'path', (['"""<int:pk>/comment/delete/"""', 'views.comment_delete_view'], {'name': '"""comment_delete"""'}), "('<int:pk>/comment/delete/', views.comment_delete_view, name=\n 'comment_delete')\n", (1093, 1176), False, 'from django.urls import path\n')]
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Sun Feb 19 21:04:18 2017 @author: pd """ #from IPython import get_ipython #get_ipython().magic('reset -sf') import numpy as np from sklearn import datasets from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt from sklearn.cross_validation import train_test_split n_features=200 X, y = datasets.make_classification(750, n_features=n_features, n_informative=5, random_state=29) X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.25, random_state=0) accuracies = [] for x in np.arange(1, n_features+1,5): dt = DecisionTreeClassifier(max_depth=x) dt.fit(X_train, y_train) preds = dt.predict(X_test) accuracies.append((preds == y_test).mean()) f, ax = plt.subplots(figsize=(7, 5)) ax.plot(range(1, n_features+1,5), accuracies, 'ko') #ax.plot(range(1, n_features+1)[:12], accuracies[:12], color='k') ax.set_title("Decision Tree Accuracy") ax.set_ylabel("% Correct") ax.set_xlabel("Max Depth") plt.show()
[ "sklearn.cross_validation.train_test_split", "matplotlib.pyplot.show", "sklearn.datasets.make_classification", "sklearn.tree.DecisionTreeClassifier", "numpy.arange", "matplotlib.pyplot.subplots" ]
[((378, 472), 'sklearn.datasets.make_classification', 'datasets.make_classification', (['(750)'], {'n_features': 'n_features', 'n_informative': '(5)', 'random_state': '(29)'}), '(750, n_features=n_features, n_informative=5,\n random_state=29)\n', (406, 472), False, 'from sklearn import datasets\n'), ((507, 561), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)', 'random_state': '(0)'}), '(X, y, test_size=0.25, random_state=0)\n', (523, 561), False, 'from sklearn.cross_validation import train_test_split\n'), ((588, 619), 'numpy.arange', 'np.arange', (['(1)', '(n_features + 1)', '(5)'], {}), '(1, n_features + 1, 5)\n', (597, 619), True, 'import numpy as np\n'), ((800, 828), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7, 5)'}), '(figsize=(7, 5))\n', (812, 828), True, 'import matplotlib.pyplot as plt\n'), ((1042, 1052), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1050, 1052), True, 'import matplotlib.pyplot as plt\n'), ((627, 662), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': 'x'}), '(max_depth=x)\n', (649, 662), False, 'from sklearn.tree import DecisionTreeClassifier\n')]
#!/usr/bin/python # coding: utf-8 import sys import Levenshtein import numpy as np assert len(sys.argv) > 1 with open(sys.argv[1], 'r', encoding='utf-8') as file: lines = file.readlines() n_lines = len(lines) distances = np.zeros((n_lines, n_lines), dtype=int) messages = [] for x in range(n_lines): for y in range(x + 1, n_lines): if x != y: value = Levenshtein.distance(lines[x], lines[y]) distances[x,y] = value if value < 5: message = "lines {} and {} look similar\n{}{}\n".format(x, y, lines[x], lines[y]) messages.append(message) for message in messages: print(message)
[ "Levenshtein.distance", "numpy.zeros" ]
[((229, 268), 'numpy.zeros', 'np.zeros', (['(n_lines, n_lines)'], {'dtype': 'int'}), '((n_lines, n_lines), dtype=int)\n', (237, 268), True, 'import numpy as np\n'), ((384, 424), 'Levenshtein.distance', 'Levenshtein.distance', (['lines[x]', 'lines[y]'], {}), '(lines[x], lines[y])\n', (404, 424), False, 'import Levenshtein\n')]
import json from copy import deepcopy from random import randrange from typing import List import uvicorn from fastapi import FastAPI, HTTPException, status from pydantic import BaseModel from starlette.responses import FileResponse app = FastAPI() class Repos(BaseModel): repositories: List[str] with open("mock_data.json") as file: test_data = json.load(file) @app.get("/orgs/{org}/repos") def repos(): return test_data["repos"] @app.get("/orgs/{org}/migrations") def list_migrations(): return test_data["migrations"] @app.post("/orgs/{org}/migrations") def start_migration(repos: Repos): if repos.repositories[0] == "Repo4": raise HTTPException(status_code=status.HTTP_502_BAD_GATEWAY) print(repos.repositories[0]) return test_data["startedMigration"][repos.repositories[0]] @app.get("/orgs/{org}/migrations/{id}") def migration_status(id: str): status = deepcopy(test_data["status"][id]) # Add some delay to the export (not the failed ones) if status["state"] != "failed" and randrange(10) > 6: status["state"] = "exported" print(status) return status @app.get("/orgs/{org}/migrations/{id}/archive") async def download_archive(id: str): # Can be created with `head -c 5M </dev/urandom > archive1` return FileResponse("archive1") if __name__ == "__main__": uvicorn.run("mock_gh_api:app", host="0.0.0.0", port=5000, reload=True)
[ "copy.deepcopy", "json.load", "starlette.responses.FileResponse", "fastapi.HTTPException", "uvicorn.run", "random.randrange", "fastapi.FastAPI" ]
[((241, 250), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (248, 250), False, 'from fastapi import FastAPI, HTTPException, status\n'), ((357, 372), 'json.load', 'json.load', (['file'], {}), '(file)\n', (366, 372), False, 'import json\n'), ((912, 945), 'copy.deepcopy', 'deepcopy', (["test_data['status'][id]"], {}), "(test_data['status'][id])\n", (920, 945), False, 'from copy import deepcopy\n'), ((1296, 1320), 'starlette.responses.FileResponse', 'FileResponse', (['"""archive1"""'], {}), "('archive1')\n", (1308, 1320), False, 'from starlette.responses import FileResponse\n'), ((1354, 1424), 'uvicorn.run', 'uvicorn.run', (['"""mock_gh_api:app"""'], {'host': '"""0.0.0.0"""', 'port': '(5000)', 'reload': '(True)'}), "('mock_gh_api:app', host='0.0.0.0', port=5000, reload=True)\n", (1365, 1424), False, 'import uvicorn\n'), ((674, 728), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_502_BAD_GATEWAY'}), '(status_code=status.HTTP_502_BAD_GATEWAY)\n', (687, 728), False, 'from fastapi import FastAPI, HTTPException, status\n'), ((1042, 1055), 'random.randrange', 'randrange', (['(10)'], {}), '(10)\n', (1051, 1055), False, 'from random import randrange\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © 2018 <NAME> """ Support creation of an iPython console, with rayoptics environment .. Created on Wed Nov 21 21:48:02 2018 .. codeauthor: <NAME> """ from PyQt5.QtGui import QColor from qtconsole.rich_jupyter_widget import RichJupyterWidget from qtconsole.inprocess import QtInProcessKernelManager from qtconsole import styles from IPython.lib import guisupport import qdarkstyle from rayoptics.gui.appmanager import ModelInfo from rayoptics.util import colors default_template = '''\ QPlainTextEdit, QTextEdit { background-color: %(bgcolor)s; background-clip: padding; color: %(fgcolor)s; selection-background-color: %(select)s; } .inverted { background-color: %(fgcolor)s; color: %(bgcolor)s; } .error { color: red; } .in-prompt { color: %(i_color)s; } .in-prompt-number { font-weight: bold; } .out-prompt { color: %(o_color)s; } .out-prompt-number { font-weight: bold; } ''' def create_ipython_console(gui_parent, opt_model, title, view_width, view_ht): """ create a iPython console with a rayoptics environment """ def create_light_or_dark_callback(ipy_console): # if not hasattr(ipy_console, 'background'): # ipy_console.background = ipy_console.background() def l_or_d(is_dark): accent = colors.accent_colors(is_dark) prompt_style = { 'i_color': accent['cyan'], 'o_color': accent['orange'], } if is_dark: ipy_console.setStyleSheet( qdarkstyle.load_stylesheet(qt_api='pyqt5')) # color_defs = {**styles.get_colors('solarized-dark'), # **prompt_style } else: ipy_console.setStyleSheet('') # color_defs = {**styles.get_colors('solarized-light'), # **prompt_style } # ipy_console.style_sheet = default_template%color_defs # ipy_console._style_sheet_changed() return l_or_d if opt_model: ro_env = { 'gui_parent': gui_parent, 'app': gui_parent.app_manager, 'opm': opt_model, 'sm': opt_model.seq_model, 'osp': opt_model.optical_spec, 'pm': opt_model.parax_model, 'em': opt_model.ele_model, 'pt': opt_model.part_tree, } else: ro_env = { 'gui_parent': gui_parent, 'app': gui_parent.app_manager, } ro_setup = 'from rayoptics.environment import *' # construct the top level widget ipy_console = ConsoleWidget() # load the environment ipy_console.execute_command(ro_setup) ipy_console.push_vars(ro_env) mi = ModelInfo(opt_model) sub_window = gui_parent.add_subwindow(ipy_console, mi) sub_window.setWindowTitle(title) sub_window.sync_light_or_dark = create_light_or_dark_callback(ipy_console) orig_x, orig_y = gui_parent.initial_window_offset() sub_window.setGeometry(orig_x, orig_y, view_width, view_ht) sub_window.show() class ConsoleWidget(RichJupyterWidget): def __init__(self, customBanner=None, *args, **kwargs): super().__init__(*args, **kwargs) if customBanner is not None: self.banner = customBanner self.font_size = 6 self.kernel_manager = kernel_manager = QtInProcessKernelManager() kernel_manager.start_kernel(show_banner=False) kernel_manager.kernel.gui = 'qt' self.kernel_client = kernel_client = self.kernel_manager.client() kernel_client.start_channels() def stop(): kernel_client.stop_channels() kernel_manager.shutdown_kernel() guisupport.get_app_qt().exit() self.exit_requested.connect(stop) def push_vars(self, variableDict): """ Given a dictionary containing name / value pairs, push those variables to the Jupyter console widget """ self.kernel_manager.kernel.shell.push(variableDict) def clear(self): """ Clears the terminal """ self._control.clear() # self.kernel_manager def print_text(self, text): """ Prints some plain text to the console """ self._append_plain_text(text) def execute_command(self, command): """ Execute a command in the frame of the console widget """ self._execute(command, False)
[ "qdarkstyle.load_stylesheet", "rayoptics.util.colors.accent_colors", "rayoptics.gui.appmanager.ModelInfo", "qtconsole.inprocess.QtInProcessKernelManager", "IPython.lib.guisupport.get_app_qt" ]
[((2895, 2915), 'rayoptics.gui.appmanager.ModelInfo', 'ModelInfo', (['opt_model'], {}), '(opt_model)\n', (2904, 2915), False, 'from rayoptics.gui.appmanager import ModelInfo\n'), ((3531, 3557), 'qtconsole.inprocess.QtInProcessKernelManager', 'QtInProcessKernelManager', ([], {}), '()\n', (3555, 3557), False, 'from qtconsole.inprocess import QtInProcessKernelManager\n'), ((1395, 1424), 'rayoptics.util.colors.accent_colors', 'colors.accent_colors', (['is_dark'], {}), '(is_dark)\n', (1415, 1424), False, 'from rayoptics.util import colors\n'), ((1643, 1685), 'qdarkstyle.load_stylesheet', 'qdarkstyle.load_stylesheet', ([], {'qt_api': '"""pyqt5"""'}), "(qt_api='pyqt5')\n", (1669, 1685), False, 'import qdarkstyle\n'), ((3887, 3910), 'IPython.lib.guisupport.get_app_qt', 'guisupport.get_app_qt', ([], {}), '()\n', (3908, 3910), False, 'from IPython.lib import guisupport\n')]
import sys, logging, time, random import web import json from intellect.Intellect import Intellect from MyIntellect import MyIntellect from Question import Question from Arrow_Model import Arrow_Model from Model import Model class Application(object): def __init__(self): # Load the rules self._myIntellect = MyIntellect() self._myIntellect.learn( self._myIntellect.local_file_uri('./rulesets/secondRuleSet.policy')) def GET(self): # Habilitate the cross-domain communication web.header('Access-Control-Allow-Origin', '*') web.header('Access-Control-Allow-Credentials', 'true') # Receive the data from the browser and create # the objects used by the policies user_data = web.input() self._model = Model() self._myIntellect.initialize() self.set_length( user_data ) self.set_height( user_data ) self.set_poundage( user_data ) self.set_temperature( user_data ) self.set_target_distance( user_data ) self._question = Question() self._arrow_model = Arrow_Model() self._myIntellect.learn( self._model ) self._myIntellect.reason() self._question.number = self._model.question self._arrow_model.value = self._model.arrow_model # Send the results to the browser on a json json_map = { 'question' : self._question.number, 'model' : self._arrow_model.get_model_name() } return json.dumps( json_map ) def set_length(self, user_data): try: self._model.arm_length = int(user_data.longitud) except AttributeError: logging.getLogger( 'ArrowSelection' ).error( '[APPLICATION] Arm_Length does not exists' ) def set_height(self, user_data): try: self._model.height = int(user_data.altura) except AttributeError: logging.getLogger( 'ArrowSelection' ).error( '[APPLICATION] Height does not exists' ) def set_poundage(self, user_data): try: self._model.poundage = int(user_data.libraje) except AttributeError: logging.getLogger( 'ArrowSelection' ).error( '[APPLICATION] Poundage does not exists' ) def set_temperature(self, user_data): try: self._model.temperature = int(user_data.temperatura) except AttributeError: logging.getLogger( 'ArrowSelection' ).error( '[APPLICATION] Temperature does not exists' ) def set_target_distance(self, user_data): try: self._model.target_distance = int(user_data.distancia) except AttributeError: logging.getLogger( 'ArrowSelection' ).error( '[APPLICATION] Target Distance does not exists' )
[ "Model.Model", "web.header", "json.dumps", "web.input", "Arrow_Model.Arrow_Model", "Question.Question", "MyIntellect.MyIntellect", "logging.getLogger" ]
[((317, 330), 'MyIntellect.MyIntellect', 'MyIntellect', ([], {}), '()\n', (328, 330), False, 'from MyIntellect import MyIntellect\n'), ((496, 542), 'web.header', 'web.header', (['"""Access-Control-Allow-Origin"""', '"""*"""'], {}), "('Access-Control-Allow-Origin', '*')\n", (506, 542), False, 'import web\n'), ((545, 599), 'web.header', 'web.header', (['"""Access-Control-Allow-Credentials"""', '"""true"""'], {}), "('Access-Control-Allow-Credentials', 'true')\n", (555, 599), False, 'import web\n'), ((702, 713), 'web.input', 'web.input', ([], {}), '()\n', (711, 713), False, 'import web\n'), ((730, 737), 'Model.Model', 'Model', ([], {}), '()\n', (735, 737), False, 'from Model import Model\n'), ((964, 974), 'Question.Question', 'Question', ([], {}), '()\n', (972, 974), False, 'from Question import Question\n'), ((997, 1010), 'Arrow_Model.Arrow_Model', 'Arrow_Model', ([], {}), '()\n', (1008, 1010), False, 'from Arrow_Model import Arrow_Model\n'), ((1343, 1363), 'json.dumps', 'json.dumps', (['json_map'], {}), '(json_map)\n', (1353, 1363), False, 'import json\n'), ((1489, 1524), 'logging.getLogger', 'logging.getLogger', (['"""ArrowSelection"""'], {}), "('ArrowSelection')\n", (1506, 1524), False, 'import sys, logging, time, random\n'), ((1695, 1730), 'logging.getLogger', 'logging.getLogger', (['"""ArrowSelection"""'], {}), "('ArrowSelection')\n", (1712, 1730), False, 'import sys, logging, time, random\n'), ((1903, 1938), 'logging.getLogger', 'logging.getLogger', (['"""ArrowSelection"""'], {}), "('ArrowSelection')\n", (1920, 1938), False, 'import sys, logging, time, random\n'), ((2123, 2158), 'logging.getLogger', 'logging.getLogger', (['"""ArrowSelection"""'], {}), "('ArrowSelection')\n", (2140, 2158), False, 'import sys, logging, time, random\n'), ((2352, 2387), 'logging.getLogger', 'logging.getLogger', (['"""ArrowSelection"""'], {}), "('ArrowSelection')\n", (2369, 2387), False, 'import sys, logging, time, random\n')]
#!/usr/bin/env python # Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo # Copyright (C) 2019-2020 German Aerospace Center (DLR) and others. # This program and the accompanying materials are made available under the # terms of the Eclipse Public License 2.0 which is available at # https://www.eclipse.org/legal/epl-2.0/ # This Source Code may also be made available under the following Secondary # Licenses when the conditions for such availability set forth in the Eclipse # Public License 2.0 are satisfied: GNU General Public License, version 2 # or later which is available at # https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html # SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later # @file personGenerator.py # @author <NAME> # @date 2019-03-22 """ This tool allows to generate flows of persons for a SUMO simulation which is currently not possible in SUMO route files. It does so by converting an xml file (usually having the ``.pflow.xml`` extension) to a sumo route file containing the generated <peron> elements. Here is an example ``.pflow.xml`` : .. code-block:: xml <routes> <personRoute id="route-1"> <walk from="e1" busStop="1" /> <probability> <probabilityItem probability="0.5"> <ride busStop="2" modes="public" /> <probability> <probabilityItem probability="0.5"> <stop busStop="2" duration="10" /> </probabilityItem> <probabilityItem probability="0.5" /> </probability> </probabilityItem> <probabilityItem probability="0.5"> <ride busStop="3" modes="public"> </probabilityItem> </probability> </personRoute> <personFlow id="forward" begin="0" end="3600" number="7" perWave="10" departPos="0" route="forward" /> <personFlow id="backward" begin="0" end="3600" period="600" perWave="10" departPos="0"> <walk from="e3" busStop="3" /> <ride busStop="1" modes="public"/> <stop busStop="1" duration="50"/> </personFlow> </routes> The example above allows to generate two flows of persons : - The first flow consists of persons taking a bus from stop 1 to either stop 2 or stop 3 (with a 50% chance for each). The persons of this flow are spawned in 7 waves (equally separated in time) and each wave consists of 10 persons. For the persons going to bus stop 2, there's a 50% chance they'll stay there during 10 ticks. The route followed by the persons of this flow is defined separately in a ``<personRoute>`` element and referenced by its ID. - The second flow consists of persons taking a bus from stop 3 to stop 1 and then stopping there for 50 ticks. The persons of this flow are spawned in periodic waves with 10 persons pere wave. The route followed by the persons is defined directly under the ``<personFlow>`` How to Use ---------- Via Command Line ~~~~~~~~~~~~~~~~ This script can be accessed directly by command line passing an input `.pflow.xml`` file's path and an output ``.rou.xml`` file's path. .. code-block:: bash python personGenerator.py pedestrians.pflow.xml pedestrians.rou.xml Note that the output file is overwritten without asking for permission. In your script ~~~~~~~~~~~~~~ You can import the classes and methods in this module and use them in your own python script. See the documentation below for more details. """ from lxml import etree import argparse import random class PersonGenerationElement(object): """ This class serves as a base for person generation elements """ def __init__(self, xml_element): self.xml_element = xml_element if self.xml_element.tag != self.get_xml_tag(): raise Exception("Bad tag") @classmethod def get_xml_tag(cls): """ This class method is meant to be implemented by subclasses It should return the xml tag for elements described by the current class """ raise NotImplementedError def generate(self): """ This method is meant to be implemented by subclasses It should return a list of elements generated by the element """ raise NotImplementedError @classmethod def wrap_elements(cls, elements, *args, **kwargs): """ Replaces xml elements with the appropriate tag (the one defined in get_xml_tag) with an object of the current class. The given list is modified, be careful :param elements: a list of xml elements :type elements: list """ for i in range(len(elements)): if not isinstance(elements[i], PersonGenerationElement) and elements[i].tag == cls.get_xml_tag(): elements[i] = cls(elements[i], *args, **kwargs) @staticmethod def generate_multiple(elements): """ Loops over a list containing xml elements and PersonGenerationElement objects. The PersonGenerationElement objects are replaced by elements generated from them The given list is not modified :param elements: A list containing xml elements and PersonGenerationElement objects. :type elements: list :return: a list of resulting xml elements :rtype list """ result = list() for element in elements: if isinstance(element, PersonGenerationElement): result.extend(element.generate()) else: result.append(element.__copy__()) return result class ProbabilityElement(PersonGenerationElement): """ This class describes probability elements that are used to generate alternatives with given probabilities. In XML it looks like: .. code-block:: xml <probability> <probabilityItem probability="0.5">fist alternative</probabilityItem> <probabilityItem probability="0.5">second alternative</probabilityItem> </probability> Each time the element is asked to generate, it returns the children of one of its alternatives according to the probabilities. Probability elements can be nested, so you can have: .. code-block:: xml <probability> <probabilityItem probability="0.5"> <probability> ... </probability> ...Possibly other stuff </probabilityItem> <probabilityItem probability="0.5"> second alternative </probabilityItem> </probability> This allows you to define conditional probabilities. Note that the nested <probability> element should be a direct child of <probabilityItem> """ def __init__(self, xml_element): """ :param xml_element: The source xml element """ super().__init__(xml_element) self.possibilities = [] for sub_element in list(self.xml_element): if sub_element.tag != "probabilityItem": raise Exception("Only probabilityItem elements are allowed inside probability") try: proba = float(sub_element.get("probability")) if proba < 0 or proba > 1: raise ValueError("") possibility = (proba, list(sub_element)) ProbabilityElement.wrap_elements(possibility[1]) self.possibilities.append(possibility) except (KeyError, ValueError): raise ValueError("probabilityItem element requires attribute probability between 0 and 1") if sum([child[0] for child in self.possibilities]) != 1: raise ValueError("Probabilities not summing up to 1 at line : " + str(self.xml_element.sourceline)) @classmethod def get_xml_tag(cls): """ :return: The tag of xml element coresponding to this class (probability) """ return "probability" def generate(self): """ :return: One of the alternatives according to the given probabilities """ result = [] cumulated_probability = 0 p = random.random() for possibility in self.possibilities: cumulated_probability += float(possibility[0]) if p <= cumulated_probability: result.extend(self.generate_multiple(possibility[1])) break return result class PersonRouteElement(PersonGenerationElement): """ This class describes xml elements that are used to define person routes separately. .. code-block:: xml <personRoute id="route"> <walk /> <stop /> <ride /> </personRoute> The content of the route is then copied to each person using it. You can use probabilities inside the **personRoute** element to have different alternatives. Basically, you can have: .. code-block:: xml <personRoute id="route"> <walk from="edge1" busStop="1"> <probability> <probabilityItem probability="0.5"> <ride busStop="2" modes="public" /> </probabilityItem> <probabilityItem probability="0.5"> <ride busStop="3" modes="public" /> </probabilityItem> </probability> </personRoute> """ def __init__(self, xml_element): super().__init__(xml_element) self.id = self.xml_element.get("id") self.children = list(self.xml_element) ProbabilityElement.wrap_elements(self.children) @classmethod def get_xml_tag(cls): """ :return: The tag of the xml elements corresponding to this class (personRoute) """ return "personRoute" @staticmethod def get_route_by_id(routes, route_id): """ :param routes: :type routes: collections.Iterable :param route_id: :type route_id: str :return: The PersonRouteElement object having the given id from the given iterable. None if not found """ for route in routes: if isinstance(route, PersonRouteElement) and route.id == route_id: return route return None def generate(self): """ :return: A copy of the sub elements of the original personRoute element probability elements are taken into account & used to generate an alternative """ return self.generate_multiple(self.children) class PersonFlowElement(PersonGenerationElement): """ This class describes xml elements that are used to generate flows of persons as it is already possible for vehicles. For example, this xml code: .. code-block:: xml <personFlow id="flow" begin="0" end="3600" number="7" perWave="10"> <walk /> <ride /> <stop /> </personFlow> will generate person elements having the same children (walk, ride, stop). The generated persons will be in 7 waves each containing 10 persons. These waves will be equally separated in time between 0 and 3600 The complete attributes list is: - id - begin : the time at which the flow starts - end : the time at which the flow ends. Not mandatory, default is 3600. - period : The time (in seconds) between two consecutive waves. Not mandatory, if not given, number will be used - number : the number of waves. Only meaningful when period is not specified - perWave : the number of persons in each wave. Not mandatory, default is 1 - route : the id of the route that the persons will follow Not mandatory, if not given, uses the children of the <personFlow> element The id of generated persons will be `<id>_<person_index>` where `<person_index>` is the index of the person in the flow (starting from 0) """ default_end = 3600 id_attribute_key = "id" begin_attribute_key = "begin" end_attribute_key = "end" period_attribute_key = "period" number_attribute_key = "number" per_wave_attribute_key = "perWave" route_attribute_key = "route" def __init__(self, xml_element, routes): """ :param xml_element: The xml element :param routes: An iterable where to look for routes :type routes: collections.Iterable """ super().__init__(xml_element) self.routes = routes self.attributes = {item[0]: item[1] for item in self.xml_element.items()} self.children = list(self.xml_element) ProbabilityElement.wrap_elements(self.children) self.id = None self.begin = None self.period = None self.route = None # We check for the attributes that concern us & we leave the others try: self.id = self.attributes.pop(self.id_attribute_key) except KeyError: print("No id attribute in personFlow, quitting") exit(-1) try: self.begin = int(self.attributes.pop(self.begin_attribute_key)) except KeyError: print("No begin in personFlow " + str(id) + ", quitting") exit(-1) try: self.end = int(self.attributes.pop(self.end_attribute_key)) except KeyError: self.end = self.default_end try: self.period = int(self.attributes.pop(self.period_attribute_key)) except KeyError: try: self.number = int(self.attributes.pop(self.number_attribute_key)) if self.number == 1: self.period = (self.end - self.begin) * 2 + 1 else: self.period = (self.end - self.begin) / (self.number - 1) except KeyError: print("Neither period nor number given for personFlow " + str(id) + ", quitting") exit(-1) try: self.per_wave = int(self.attributes.pop(self.per_wave_attribute_key)) except KeyError: self.per_wave = 1 try: route_id = self.attributes.pop(self.route_attribute_key) self.route = PersonRouteElement.get_route_by_id(routes, route_id) if self.route is None: raise Exception("Route with id " + route_id + " not found at line " + str(self.xml_element.sourceline)) except KeyError: pass @classmethod def get_xml_tag(cls): """ :return: The tag of the xml elements corresponding to the current class (personFlow) """ return "personFlow" def generate(self): """ :return: The persons of the flow """ begin = self.begin p_id = 0 elements = list() while begin <= self.end: for i in range(self.per_wave): element = etree.Element("person", self.attributes) element.set("depart", str(int(begin))) element.set("id", self.id + "_" + str(p_id)) if self.route is not None: element.extend(self.route.generate()) else: element.extend(self.generate_multiple(self.children)) elements.append(element) p_id += 1 begin += self.period return elements def generate_persons(input_file, output_file): """ Core method of the script, parses <personFlow> tags in an XML file and generates <person> elements. The generated <person> elements are sorted by their depart time. The original file is not modified and the result is written in another file. The resulting file will not contain the <personFlow> elements. Note that the output file is overwritten if it is already exist :param input_file: The path of the input file :param output_file: The path of the output file """ # Parse the input file tree = etree.parse(input_file) routes = tree.getroot() children = list(routes) for child in children: routes.remove(child) PersonRouteElement.wrap_elements(children) person_routes = [child for child in children if isinstance(child, PersonRouteElement)] PersonFlowElement.wrap_elements(children, routes=person_routes) for person_route in person_routes: children.remove(person_route) person_elements = PersonGenerationElement.generate_multiple(children) person_elements.sort(key=lambda e: int(e.get('depart'))) routes.extend(person_elements) with open(output_file, "w") as f: f.write(etree.tostring(routes).decode()) f.close() if __name__ == "__main__": # Parses the command line arguments parser = argparse.ArgumentParser() parser.add_argument("source") parser.add_argument("destination") source, destination = parser.parse_args() generate_persons(source, destination)
[ "argparse.ArgumentParser", "lxml.etree.Element", "random.random", "lxml.etree.parse", "lxml.etree.tostring" ]
[((16150, 16173), 'lxml.etree.parse', 'etree.parse', (['input_file'], {}), '(input_file)\n', (16161, 16173), False, 'from lxml import etree\n'), ((16928, 16953), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (16951, 16953), False, 'import argparse\n'), ((8325, 8340), 'random.random', 'random.random', ([], {}), '()\n', (8338, 8340), False, 'import random\n'), ((15078, 15118), 'lxml.etree.Element', 'etree.Element', (['"""person"""', 'self.attributes'], {}), "('person', self.attributes)\n", (15091, 15118), False, 'from lxml import etree\n'), ((16795, 16817), 'lxml.etree.tostring', 'etree.tostring', (['routes'], {}), '(routes)\n', (16809, 16817), False, 'from lxml import etree\n')]
# -*- coding: utf-8 -*- # Part of BrowseInfo. See LICENSE file for full copyright and licensing details. from odoo import api, fields, models, _ from datetime import date,datetime class wizard_multiple_test_request(models.TransientModel): _name = 'wizard.multiple.test.request' request_date = fields.Datetime('Request Date', required = True) wizard_multiple_test_patient_id = fields.Many2one('medical.patient','Patient', required = True) urgent = fields.Boolean('Urgent',) wizard_multiple_test_physician_id = fields.Many2one('medical.physician','Doctor', required = True) wizard_multiple_test_owner_partner_id = fields.Many2one('res.partner','Owner') tests_ids = fields.Many2many('medical.test_type', 'lab_test_report_test_rel', 'test_id', 'report_id', 'Tests') @api.multi def create_lab_test(self): wizard_obj = self patient_id = wizard_obj.wizard_multiple_test_patient_id phy_id = wizard_obj.wizard_multiple_test_physician_id new_created_id_list = [] date = wizard_obj.request_date for test_id in wizard_obj.tests_ids: lab_test_req_obj = self.env['medical.test_type'] test_browse_record = lab_test_req_obj.browse(test_id.id) test_name = test_browse_record.name medical_test_request_obj = self.env['medical.patient.lab.test'] new_created_id = medical_test_request_obj.create({'date': date, 'doctor_id': phy_id.id, 'patient_id':patient_id.id, 'state': 'tested', 'name':test_id.id, 'request' :self.env['ir.sequence'].next_by_code('test_seq') }) new_created_id_list.append(new_created_id.id) if new_created_id_list: imd = self.env['ir.model.data'] action = imd.xmlid_to_object('hospital_management.action_medical_patient_lab_test') list_view_id = imd.xmlid_to_res_id('hospital_management.medical_patient_lab_test_tree_view') result = { 'name': action.name, 'help': action.help, 'type': action.type, 'views': [ [list_view_id,'tree' ]], 'target': action.target, 'context': action.context, 'res_model': action.res_model, } if len(new_created_id_list) : result['domain'] = "[('id','in',%s)]" % new_created_id_list return result # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
[ "odoo.fields.Many2many", "odoo.fields.Datetime", "odoo.fields.Many2one", "odoo.fields.Boolean" ]
[((304, 350), 'odoo.fields.Datetime', 'fields.Datetime', (['"""Request Date"""'], {'required': '(True)'}), "('Request Date', required=True)\n", (319, 350), False, 'from odoo import api, fields, models, _\n'), ((392, 452), 'odoo.fields.Many2one', 'fields.Many2one', (['"""medical.patient"""', '"""Patient"""'], {'required': '(True)'}), "('medical.patient', 'Patient', required=True)\n", (407, 452), False, 'from odoo import api, fields, models, _\n'), ((468, 492), 'odoo.fields.Boolean', 'fields.Boolean', (['"""Urgent"""'], {}), "('Urgent')\n", (482, 492), False, 'from odoo import api, fields, models, _\n'), ((534, 595), 'odoo.fields.Many2one', 'fields.Many2one', (['"""medical.physician"""', '"""Doctor"""'], {'required': '(True)'}), "('medical.physician', 'Doctor', required=True)\n", (549, 595), False, 'from odoo import api, fields, models, _\n'), ((642, 681), 'odoo.fields.Many2one', 'fields.Many2one', (['"""res.partner"""', '"""Owner"""'], {}), "('res.partner', 'Owner')\n", (657, 681), False, 'from odoo import api, fields, models, _\n'), ((697, 799), 'odoo.fields.Many2many', 'fields.Many2many', (['"""medical.test_type"""', '"""lab_test_report_test_rel"""', '"""test_id"""', '"""report_id"""', '"""Tests"""'], {}), "('medical.test_type', 'lab_test_report_test_rel', 'test_id',\n 'report_id', 'Tests')\n", (713, 799), False, 'from odoo import api, fields, models, _\n')]
# This is an auto-generated Django model module. # You'll have to do the following manually to clean this up: # * Rearrange models' order # * Make sure each model has one field with primary_key=True # * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table # Feel free to rename the models, but don't rename db_table values or field names. # # Also note: You'll have to insert the output of 'django-admin sqlcustom [app_label]' # into your database. from __future__ import unicode_literals from django.db import models class Comment(models.Model): comment_id = models.IntegerField() class_code = models.CharField(max_length=20) post_id = models.IntegerField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True) author_id = models.CharField(max_length=20) content = models.TextField(blank=True, null=True) flag = models.IntegerField(blank=True, null=True) class Meta: managed = False db_table = 'comment' class Course20182(models.Model): class_code = models.CharField(max_length=20) class_name = models.CharField(max_length=100) class_year = models.CharField(max_length=10, blank=True, null=True) quota = models.CharField(max_length=10, blank=True, null=True) instructor = models.CharField(max_length=100, blank=True, null=True) credit = models.CharField(max_length=10, blank=True, null=True) class_hour_room = models.CharField(max_length=500, blank=True, null=True) class_type = models.CharField(max_length=20, blank=True, null=True) class_lan = models.CharField(max_length=50, blank=True, null=True) notice = models.CharField(max_length=100, blank=True, null=True) campus = models.CharField(max_length=10, blank=True, null=True) class Meta: managed = False db_table = 'course_2018_20' class DjangoMigrations(models.Model): app = models.CharField(max_length=255) name = models.CharField(max_length=255) applied = models.DateTimeField() class Meta: managed = False db_table = 'django_migrations' class Post(models.Model): post_id = models.IntegerField() class_code = models.CharField(max_length=20) author_id = models.CharField(max_length=20) title = models.CharField(max_length=255) content = models.TextField(blank=True, null=True) create_date = models.DateTimeField(blank=True, null=True) hit = models.IntegerField() flag = models.IntegerField(blank=True, null=True) class Meta: managed = False db_table = 'post' class User(models.Model): klas_id = models.CharField(primary_key=True, max_length=20) naver_id = models.CharField(max_length=20, blank=True, null=True) lectures = models.CharField(max_length=512, blank=True, null=True) name = models.CharField(max_length=20, blank=True, null=True) class Meta: managed = False db_table = 'user'
[ "django.db.models.CharField", "django.db.models.IntegerField", "django.db.models.TextField", "django.db.models.DateTimeField" ]
[((619, 640), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (638, 640), False, 'from django.db import models\n'), ((658, 689), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (674, 689), False, 'from django.db import models\n'), ((704, 746), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (723, 746), False, 'from django.db import models\n'), ((765, 808), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (785, 808), False, 'from django.db import models\n'), ((825, 856), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (841, 856), False, 'from django.db import models\n'), ((871, 910), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (887, 910), False, 'from django.db import models\n'), ((922, 964), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (941, 964), False, 'from django.db import models\n'), ((1087, 1118), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (1103, 1118), False, 'from django.db import models\n'), ((1136, 1168), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1152, 1168), False, 'from django.db import models\n'), ((1186, 1240), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'blank': '(True)', 'null': '(True)'}), '(max_length=10, blank=True, null=True)\n', (1202, 1240), False, 'from django.db import models\n'), ((1253, 1307), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'blank': '(True)', 'null': '(True)'}), '(max_length=10, blank=True, null=True)\n', (1269, 1307), False, 'from django.db import models\n'), ((1325, 1380), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (1341, 1380), False, 'from django.db import models\n'), ((1394, 1448), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'blank': '(True)', 'null': '(True)'}), '(max_length=10, blank=True, null=True)\n', (1410, 1448), False, 'from django.db import models\n'), ((1471, 1526), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)', 'blank': '(True)', 'null': '(True)'}), '(max_length=500, blank=True, null=True)\n', (1487, 1526), False, 'from django.db import models\n'), ((1544, 1598), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)', 'null': '(True)'}), '(max_length=20, blank=True, null=True)\n', (1560, 1598), False, 'from django.db import models\n'), ((1615, 1669), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'blank': '(True)', 'null': '(True)'}), '(max_length=50, blank=True, null=True)\n', (1631, 1669), False, 'from django.db import models\n'), ((1683, 1738), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (1699, 1738), False, 'from django.db import models\n'), ((1752, 1806), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'blank': '(True)', 'null': '(True)'}), '(max_length=10, blank=True, null=True)\n', (1768, 1806), False, 'from django.db import models\n'), ((1934, 1966), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1950, 1966), False, 'from django.db import models\n'), ((1978, 2010), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1994, 2010), False, 'from django.db import models\n'), ((2025, 2047), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (2045, 2047), False, 'from django.db import models\n'), ((2170, 2191), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2189, 2191), False, 'from django.db import models\n'), ((2209, 2240), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (2225, 2240), False, 'from django.db import models\n'), ((2257, 2288), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (2273, 2288), False, 'from django.db import models\n'), ((2301, 2333), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2317, 2333), False, 'from django.db import models\n'), ((2348, 2387), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2364, 2387), False, 'from django.db import models\n'), ((2406, 2449), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2426, 2449), False, 'from django.db import models\n'), ((2460, 2481), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2479, 2481), False, 'from django.db import models\n'), ((2493, 2535), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2512, 2535), False, 'from django.db import models\n'), ((2645, 2694), 'django.db.models.CharField', 'models.CharField', ([], {'primary_key': '(True)', 'max_length': '(20)'}), '(primary_key=True, max_length=20)\n', (2661, 2694), False, 'from django.db import models\n'), ((2710, 2764), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)', 'null': '(True)'}), '(max_length=20, blank=True, null=True)\n', (2726, 2764), False, 'from django.db import models\n'), ((2780, 2835), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'blank': '(True)', 'null': '(True)'}), '(max_length=512, blank=True, null=True)\n', (2796, 2835), False, 'from django.db import models\n'), ((2847, 2901), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)', 'null': '(True)'}), '(max_length=20, blank=True, null=True)\n', (2863, 2901), False, 'from django.db import models\n')]
from __future__ import absolute_import from __future__ import division from __future__ import print_function import edward as ed import numpy as np import tensorflow as tf from collections import namedtuple from edward.models import ( Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution) from tensorflow.contrib.distributions import bijectors class test_transform_class(tf.test.TestCase): def assertSamplePosNeg(self, sample): num_pos = np.sum((sample > 0.0), axis=0, keepdims=True) num_neg = np.sum((sample < 0.0), axis=0, keepdims=True) self.assertTrue((num_pos > 0).all()) self.assertTrue((num_neg > 0).all()) def test_args(self): with self.test_session(): x = Normal(-100.0, 1.0) y = ed.transform(x, bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_kwargs(self): with self.test_session(): x = Normal(-100.0, 1.0) y = ed.transform(x, bijector=bijectors.Softplus()) sample = y.sample(10).eval() self.assertTrue((sample >= 0.0).all()) def test_01(self): with self.test_session(): x = Beta(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_nonnegative(self): with self.test_session(): x = Gamma(1.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_simplex(self): with self.test_session(): x = Dirichlet([1.1, 1.2, 1.3, 1.4]) y = ed.transform(x) self.assertIsInstance(y, TransformedDistribution) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_real(self): with self.test_session(): x = Normal(0.0, 1.0) y = ed.transform(x) self.assertIsInstance(y, Normal) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_multivariate_real(self): with self.test_session(): x = MultivariateNormalDiag(tf.zeros(2), tf.ones(2)) y = ed.transform(x) sample = y.sample(10, seed=1).eval() self.assertSamplePosNeg(sample) def test_no_support(self): with self.test_session(): x = DirichletProcess(1.0, Normal(0.0, 1.0)) with self.assertRaises(AttributeError): y = ed.transform(x) def test_unhandled_support(self): with self.test_session(): FakeRV = namedtuple('FakeRV', ['support']) x = FakeRV(support='rational') with self.assertRaises(ValueError): y = ed.transform(x) if __name__ == '__main__': tf.test.main()
[ "tensorflow.test.main", "tensorflow.ones", "edward.transform", "numpy.sum", "edward.models.Dirichlet", "tensorflow.zeros", "collections.namedtuple", "edward.models.Normal", "edward.models.Gamma", "edward.models.Beta", "tensorflow.contrib.distributions.bijectors.Softplus" ]
[((2782, 2796), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (2794, 2796), True, 'import tensorflow as tf\n'), ((511, 554), 'numpy.sum', 'np.sum', (['(sample > 0.0)'], {'axis': '(0)', 'keepdims': '(True)'}), '(sample > 0.0, axis=0, keepdims=True)\n', (517, 554), True, 'import numpy as np\n'), ((571, 614), 'numpy.sum', 'np.sum', (['(sample < 0.0)'], {'axis': '(0)', 'keepdims': '(True)'}), '(sample < 0.0, axis=0, keepdims=True)\n', (577, 614), True, 'import numpy as np\n'), ((763, 782), 'edward.models.Normal', 'Normal', (['(-100.0)', '(1.0)'], {}), '(-100.0, 1.0)\n', (769, 782), False, 'from edward.models import Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution\n'), ((977, 996), 'edward.models.Normal', 'Normal', (['(-100.0)', '(1.0)'], {}), '(-100.0, 1.0)\n', (983, 996), False, 'from edward.models import Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution\n'), ((1196, 1210), 'edward.models.Beta', 'Beta', (['(1.0)', '(1.0)'], {}), '(1.0, 1.0)\n', (1200, 1210), False, 'from edward.models import Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution\n'), ((1221, 1236), 'edward.transform', 'ed.transform', (['x'], {}), '(x)\n', (1233, 1236), True, 'import edward as ed\n'), ((1445, 1460), 'edward.models.Gamma', 'Gamma', (['(1.0)', '(1.0)'], {}), '(1.0, 1.0)\n', (1450, 1460), False, 'from edward.models import Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution\n'), ((1471, 1486), 'edward.transform', 'ed.transform', (['x'], {}), '(x)\n', (1483, 1486), True, 'import edward as ed\n'), ((1691, 1722), 'edward.models.Dirichlet', 'Dirichlet', (['[1.1, 1.2, 1.3, 1.4]'], {}), '([1.1, 1.2, 1.3, 1.4])\n', (1700, 1722), False, 'from edward.models import Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution\n'), ((1733, 1748), 'edward.transform', 'ed.transform', (['x'], {}), '(x)\n', (1745, 1748), True, 'import edward as ed\n'), ((1950, 1966), 'edward.models.Normal', 'Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1956, 1966), False, 'from edward.models import Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution\n'), ((1977, 1992), 'edward.transform', 'ed.transform', (['x'], {}), '(x)\n', (1989, 1992), True, 'import edward as ed\n'), ((2248, 2263), 'edward.transform', 'ed.transform', (['x'], {}), '(x)\n', (2260, 2263), True, 'import edward as ed\n'), ((2611, 2644), 'collections.namedtuple', 'namedtuple', (['"""FakeRV"""', "['support']"], {}), "('FakeRV', ['support'])\n", (2621, 2644), False, 'from collections import namedtuple\n'), ((809, 829), 'tensorflow.contrib.distributions.bijectors.Softplus', 'bijectors.Softplus', ([], {}), '()\n', (827, 829), False, 'from tensorflow.contrib.distributions import bijectors\n'), ((2213, 2224), 'tensorflow.zeros', 'tf.zeros', (['(2)'], {}), '(2)\n', (2221, 2224), True, 'import tensorflow as tf\n'), ((2226, 2236), 'tensorflow.ones', 'tf.ones', (['(2)'], {}), '(2)\n', (2233, 2236), True, 'import tensorflow as tf\n'), ((2437, 2453), 'edward.models.Normal', 'Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (2443, 2453), False, 'from edward.models import Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag, Normal, Poisson, TransformedDistribution\n'), ((2513, 2528), 'edward.transform', 'ed.transform', (['x'], {}), '(x)\n', (2525, 2528), True, 'import edward as ed\n'), ((2736, 2751), 'edward.transform', 'ed.transform', (['x'], {}), '(x)\n', (2748, 2751), True, 'import edward as ed\n'), ((1032, 1052), 'tensorflow.contrib.distributions.bijectors.Softplus', 'bijectors.Softplus', ([], {}), '()\n', (1050, 1052), False, 'from tensorflow.contrib.distributions import bijectors\n')]
import sys import reader r = reader.Reader(sys.argv[1]) try: print(r.read()) finally: r.close()
[ "reader.Reader" ]
[((29, 55), 'reader.Reader', 'reader.Reader', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (42, 55), False, 'import reader\n')]
import torch as t import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from model.encoder import Encoder from model.decoder import Decoder import math class VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.encoder = Encoder() self.decoder = Decoder() def forward(self, input, z=None): """ :param input: an Float tensor with shape of [batch_size, 1, 28, 28] :param z: an Float tensor with shape of [batch_size, latent_size] if sampling is performed :return: an Float tensor with shape of [batch_size, 1, 28, 28], [batch_size, 16], [batch_size, 16] """ mu, logvar = self.encoder(input) [batch_size, latent_size] = mu.size() std = t.exp(0.5 * logvar) z = Variable(t.randn([batch_size, 15, latent_size])) if input.is_cuda: z = z.cuda() mu_repeated = mu.unsqueeze(1).repeat(1, 15, 1) std_repeated = std.unsqueeze(1).repeat(1, 15, 1) z = z * std_repeated + mu_repeated z = z.view(batch_size * 15, -1) return self.decoder(z), mu, logvar, z def encode(self, input): return self.encoder(input) def decode(self, input): return self.decoder(input) @staticmethod def monte_carlo_divergence(z, mu, std, n): [batch_size, latent_size] = mu.size() log_p_z_x = VAE.normal_prob(z, mu, std) log_p_z = VAE.normal_prob(z, Variable(t.zeros(batch_size, latent_size)), Variable(t.ones(batch_size, latent_size))) result = log_p_z_x - log_p_z return result.view(-1, n).sum(1) / n @staticmethod def normal_prob(z, mu, std): return t.exp(-0.5 * ((z - mu) * t.pow(std + 1e-8, -1) * (z - mu)).sum(1)) / \ t.sqrt(t.abs(2 * math.pi * std.prod(1))) @staticmethod def divergence_with_prior(mu, logvar): return (-0.5 * t.sum(logvar - t.pow(mu, 2) - t.exp(logvar) + 1, 1)).mean() @staticmethod def divergence_with_posterior(p_first, p_second): """ :params p_first, p_second: tuples with parameters of distribution over latent variables :return: divirgence estimation """ return 0.5 * t.sum(2 * p_second[1] - 2 * p_first[1] + t.exp(p_first[1]) / (t.exp(p_second[1]) + 1e-8) + t.pow(p_second[0] - p_second[0], 2) / (t.exp(p_second[1]) + 1e-8) - 1).mean()
[ "torch.ones", "torch.randn", "torch.zeros", "model.encoder.Encoder", "torch.exp", "torch.pow", "model.decoder.Decoder" ]
[((296, 305), 'model.encoder.Encoder', 'Encoder', ([], {}), '()\n', (303, 305), False, 'from model.encoder import Encoder\n'), ((329, 338), 'model.decoder.Decoder', 'Decoder', ([], {}), '()\n', (336, 338), False, 'from model.decoder import Decoder\n'), ((788, 807), 'torch.exp', 't.exp', (['(0.5 * logvar)'], {}), '(0.5 * logvar)\n', (793, 807), True, 'import torch as t\n'), ((830, 868), 'torch.randn', 't.randn', (['[batch_size, 15, latent_size]'], {}), '([batch_size, 15, latent_size])\n', (837, 868), True, 'import torch as t\n'), ((1536, 1568), 'torch.zeros', 't.zeros', (['batch_size', 'latent_size'], {}), '(batch_size, latent_size)\n', (1543, 1568), True, 'import torch as t\n'), ((1614, 1645), 'torch.ones', 't.ones', (['batch_size', 'latent_size'], {}), '(batch_size, latent_size)\n', (1620, 1645), True, 'import torch as t\n'), ((2040, 2053), 'torch.exp', 't.exp', (['logvar'], {}), '(logvar)\n', (2045, 2053), True, 'import torch as t\n'), ((1823, 1845), 'torch.pow', 't.pow', (['(std + 1e-08)', '(-1)'], {}), '(std + 1e-08, -1)\n', (1828, 1845), True, 'import torch as t\n'), ((2025, 2037), 'torch.pow', 't.pow', (['mu', '(2)'], {}), '(mu, 2)\n', (2030, 2037), True, 'import torch as t\n'), ((2442, 2477), 'torch.pow', 't.pow', (['(p_second[0] - p_second[0])', '(2)'], {}), '(p_second[0] - p_second[0], 2)\n', (2447, 2477), True, 'import torch as t\n'), ((2365, 2382), 'torch.exp', 't.exp', (['p_first[1]'], {}), '(p_first[1])\n', (2370, 2382), True, 'import torch as t\n'), ((2481, 2499), 'torch.exp', 't.exp', (['p_second[1]'], {}), '(p_second[1])\n', (2486, 2499), True, 'import torch as t\n'), ((2386, 2404), 'torch.exp', 't.exp', (['p_second[1]'], {}), '(p_second[1])\n', (2391, 2404), True, 'import torch as t\n')]
import logging from celery import shared_task from bdn import contract from bdn import redis from .perform_ipfs_meta_verifications_array import ( perform_ipfs_meta_verifications_array) logger = logging.getLogger(__name__) @shared_task def listen_ethereum_ipfs_hash_storage(): redis_db = redis.get_redis() verification_storage = contract.contract('VerificationStorage') event = verification_storage.events.Verification last_block = redis_db.get('_verification_filter_block') or 0 if last_block != 0: last_block = int(last_block) hash_filter = event.createFilter(fromBlock=last_block) for entry in hash_filter.get_all_entries(): block_number = int(entry['blockNumber']) entry_args = dict(entry['args']) entry_data = { 'transactionHash': entry['transactionHash'].hex(), 'blockHash': entry['blockHash'].hex(), 'blockNumber': entry['blockNumber'], 'args': { 'ipfsHash': entry_args.get('ipfsHash', b'').decode(), }, } perform_ipfs_meta_verifications_array.delay(entry_data) if block_number > last_block: redis_db.set('_verification_filter_block', block_number)
[ "bdn.contract.contract", "bdn.redis.get_redis", "logging.getLogger" ]
[((200, 227), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (217, 227), False, 'import logging\n'), ((299, 316), 'bdn.redis.get_redis', 'redis.get_redis', ([], {}), '()\n', (314, 316), False, 'from bdn import redis\n'), ((344, 384), 'bdn.contract.contract', 'contract.contract', (['"""VerificationStorage"""'], {}), "('VerificationStorage')\n", (361, 384), False, 'from bdn import contract\n')]
import unittest from prestans.http import STATUS from prestans.http import VERB from prestans import exception class ExceptionBase(unittest.TestCase): def test_http_status(self): base_value = exception.Base(http_status=STATUS.OK, message="message") self.assertEqual(base_value.http_status, STATUS.OK) base_value.http_status = STATUS.NO_CONTENT self.assertEqual(base_value.http_status, STATUS.NO_CONTENT) def test_stack_trace(self): base = exception.Base(http_status=STATUS.OK, message="message") self.assertEqual(base.stack_trace, []) def test_push_trace(self): pass def test_message(self): base_value = exception.Base(http_status=STATUS.OK, message="message") self.assertEqual(base_value.message, "message") def test_str(self): base = exception.Base(http_status=STATUS.OK, message="message") self.assertEqual(base.http_status, STATUS.OK) self.assertEqual(str(base.message), "message") class ExceptionUnsupportedVocabularyError(unittest.TestCase): def test_init(self): unsupported_vocabulary_error = exception.UnsupportedVocabularyError( accept_header="accept", supported_types=["a", "b", "c"] ) self.assertEqual(unsupported_vocabulary_error.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_vocabulary_error.message, "Unsupported vocabulary in the Accept header") stack_trace = [{ "accept_header": "accept", "supported_types": ["a", "b", "c"] }] self.assertEqual(unsupported_vocabulary_error.stack_trace, stack_trace) class ExceptionUnsupportedContentTypeError(unittest.TestCase): def test_init(self): unsupported_content_type = exception.UnsupportedContentTypeError("text/plain", "application/json") self.assertEqual(unsupported_content_type.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unsupported_content_type.message, "Unsupported Content-Type in Request") stack_trace = [{ "requested_type": "text/plain", "supported_types": "application/json" }] self.assertEqual(unsupported_content_type.stack_trace, stack_trace) class ExceptionValidationError(unittest.TestCase): def test_init(self): validation_error = exception.ValidationError( message="message", attribute_name="attribute", value="value", blueprint={"key": "value"} ) self.assertEqual(validation_error.http_status, STATUS.BAD_REQUEST) self.assertEqual(validation_error.message, "message") self.assertEqual(validation_error.stack_trace, [ { "attribute_name": "attribute", "value": "value", "message": "message", "blueprint": {"key": "value"} } ]) self.assertEqual(str(validation_error), "attribute message") class ExceptionHandlerException(unittest.TestCase): def test_init(self): from prestans.rest import Request import logging logging.basicConfig() self.logger = logging.getLogger("prestans") from prestans.deserializer import JSON charset = "utf-8" serializers = [JSON()] default_serializer = JSON() request_environ = { "REQUEST_METHOD": VERB.GET, "PATH_INFO": "/url", "HTTP_USER_AGENT": "chrome", "wsgi.url_scheme": "https", "SERVER_NAME": "localhost", "SERVER_PORT": "8080" } request = Request( environ=request_environ, charset=charset, logger=self.logger, deserializers=serializers, default_deserializer=default_serializer ) handler_exception = exception.HandlerException(STATUS.FORBIDDEN, "message") handler_exception.request = request self.assertEqual(handler_exception.http_status, STATUS.FORBIDDEN) self.assertEqual(handler_exception.message, "message") self.assertEqual(handler_exception.request, request) self.assertEqual(handler_exception.log_message, 'GET https://localhost:8080/url chrome "message"') self.assertEqual(str(handler_exception), 'GET https://localhost:8080/url chrome "message"') handler_exception_without_request = exception.HandlerException(STATUS.NOT_FOUND, "message") self.assertEqual(handler_exception_without_request.http_status, STATUS.NOT_FOUND) self.assertEqual(handler_exception_without_request.message, "message") self.assertEqual(handler_exception_without_request.log_message, "message") self.assertEqual(str(handler_exception_without_request), "message") class ExceptionRequestException(unittest.TestCase): def test_init(self): request_exception = exception.RequestException(STATUS.BAD_REQUEST, "bad request") self.assertEqual(request_exception.http_status, STATUS.BAD_REQUEST) self.assertEqual(request_exception.message, "bad request") class ExceptionUnimplementedVerbError(unittest.TestCase): def test_init(self): unimplemented_verb = exception.UnimplementedVerbError("GET") self.assertEqual(unimplemented_verb.http_status, STATUS.NOT_IMPLEMENTED) self.assertEqual(unimplemented_verb.message, "API does not implement the HTTP Verb") self.assertEqual(unimplemented_verb.stack_trace, [{"verb": "GET"}]) class ExceptionNoEndpointError(unittest.TestCase): def test_init(self): no_endpoint = exception.NoEndpointError() self.assertEqual(no_endpoint.http_status, STATUS.NOT_FOUND) self.assertEqual(no_endpoint.message, "API does not provide this end-point") class ExceptionAuthenticationError(unittest.TestCase): def test_init(self): authentication = exception.AuthenticationError() self.assertEqual(authentication.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication.message, "Authentication Error; service is only available to authenticated") authentication_custom = exception.AuthenticationError("Custom message") self.assertEqual(authentication_custom.http_status, STATUS.UNAUTHORIZED) self.assertEqual(authentication_custom.message, "Custom message") class ExceptionAuthorizationError(unittest.TestCase): def test_init(self): authorization = exception.AuthorizationError("Role") self.assertEqual(authorization.http_status, STATUS.FORBIDDEN) self.assertEqual(authorization.message, "Role is not allowed to access this resource") class ExceptionSerializationFailedError(unittest.TestCase): def test_init(self): serialization_failed_error = exception.SerializationFailedError("format") self.assertEqual(serialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(serialization_failed_error.message, "Serialization failed: format") self.assertEqual(str(serialization_failed_error), "Serialization failed: format") class ExceptionDeSerializationFailedError(unittest.TestCase): def test_init(self): deserialization_failed_error = exception.DeSerializationFailedError("format") self.assertEqual(deserialization_failed_error.http_status, STATUS.NOT_FOUND) self.assertEqual(deserialization_failed_error.message, "DeSerialization failed: format") self.assertEqual(str(deserialization_failed_error), "DeSerialization failed: format") class ExceptionAttributeFilterDiffers(unittest.TestCase): def test_init(self): attribute_filter_differs = exception.AttributeFilterDiffers(["cat", "dog"]) self.assertEqual(attribute_filter_differs.http_status, STATUS.BAD_REQUEST) self.assertEqual( attribute_filter_differs.message, "attribute filter contains attributes (cat, dog) that are not part of template" ) class ExceptionInconsistentPersistentDataError(unittest.TestCase): def test_init(self): error = exception.InconsistentPersistentDataError("name", "error message") self.assertEqual(error.http_status, STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(error.message, "Data Adapter failed to validate stored data on the server") self.assertEqual( str(error), "DataAdapter failed to adapt name, Data Adapter failed to validate stored data on the server" ) self.assertEqual(error.stack_trace, [{'exception_message': "error message", 'attribute_name': "name"}]) class ExceptionDataValidationException(unittest.TestCase): def test_init(self): exp = exception.DataValidationException("message") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, "message") class ExceptionRequiredAttributeError(unittest.TestCase): def test_init(self): exp = exception.RequiredAttributeError() self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, "attribute is required and does not provide a default value") class ExceptionParseFailedError(unittest.TestCase): def test_init(self): default_msg = exception.ParseFailedError() self.assertEqual(default_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(default_msg.message, "Parser Failed") custom_msg = exception.ParseFailedError("custom") self.assertEqual(custom_msg.http_status, STATUS.BAD_REQUEST) self.assertEqual(custom_msg.message, "custom") class ExceptionLessThanMinimumError(unittest.TestCase): def test_init(self): exp = exception.LessThanMinimumError(3, 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, "3 is less than the allowed minimum of 5") class ExceptionMoreThanMaximumError(unittest.TestCase): def test_init(self): exp = exception.MoreThanMaximumError(5, 3) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, "5 is more than the allowed maximum of 3") class ExceptionInvalidChoiceError(unittest.TestCase): def test_init(self): exp = exception.InvalidChoiceError(3, [1, 2, 5]) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, "value 3 is not one of these choices 1, 2, 5") class ExceptionMinimumLengthError(unittest.TestCase): def test_init(self): exp = exception.MinimumLengthError("dog", 5) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, "length of value: dog has to be greater than 5") class ExceptionMaximumLengthError(unittest.TestCase): def test_init(self): exp = exception.MaximumLengthError("dog", 2) self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, "length of value: dog has to be less than 2") class ExceptionInvalidTypeError(unittest.TestCase): def test_init(self): exp = exception.InvalidTypeError("str", "int") self.assertEqual(exp.http_status, STATUS.BAD_REQUEST) self.assertEqual(exp.message, "data type str given, expected int") class ExceptionMissingParameterError(unittest.TestCase): def test_init(self): missing_parameter = exception.MissingParameterError() self.assertEqual(missing_parameter.http_status, STATUS.BAD_REQUEST) self.assertEqual(missing_parameter.message, "missing parameter") class ExceptionInvalidFormatError(unittest.TestCase): def test_init(self): invalid_format = exception.InvalidFormatError("cat") self.assertEqual(invalid_format.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_format.message, "invalid value cat provided") class ExceptionInvalidMetaValueError(unittest.TestCase): def test_init(self): invalid_meta_value = exception.InvalidMetaValueError() self.assertEqual(invalid_meta_value.http_status, STATUS.BAD_REQUEST) self.assertEqual(invalid_meta_value.message, "invalid meta value") class ExceptionUnregisteredAdapterError(unittest.TestCase): def test_init(self): unregistered_adapter = exception.UnregisteredAdapterError("namespace.Model") self.assertEqual(unregistered_adapter.http_status, STATUS.BAD_REQUEST) self.assertEqual(unregistered_adapter.message, "no registered adapters for data model namespace.Model") class ExceptionResponseException(unittest.TestCase): def test_init(self): from prestans.types import Model class MyModel(Model): pass my_model = MyModel() response = exception.ResponseException(STATUS.OK, "message", my_model) self.assertEqual(response.http_status, STATUS.OK) self.assertEqual(response.message, "message") self.assertEqual(response.response_model, my_model) self.assertRaises(TypeError, exception.ResponseException, STATUS.INTERNAL_SERVER_ERROR, "message", "string") class ExceptionServiceUnavailable(unittest.TestCase): def test_init(self): service_unavailable = exception.ServiceUnavailable() self.assertEqual(service_unavailable.http_status, STATUS.SERVICE_UNAVAILABLE) self.assertEqual(service_unavailable.message, "Service Unavailable") class ExceptionBadRequest(unittest.TestCase): def test_init(self): bad_request = exception.BadRequest() self.assertEqual(bad_request.http_status, STATUS.BAD_REQUEST) self.assertEqual(bad_request.message, "Bad Request") class ExceptionConflict(unittest.TestCase): def test_init(self): conflict = exception.Conflict() self.assertEqual(conflict.http_status, STATUS.CONFLICT) self.assertEqual(conflict.message, "Conflict") class ExceptionNotFound(unittest.TestCase): def test_init(self): not_found = exception.NotFound() self.assertEqual(not_found.http_status, STATUS.NOT_FOUND) self.assertEqual(not_found.message, "Not Found") class ExceptionUnauthorized(unittest.TestCase): def test_init(self): unauthorized = exception.Unauthorized() self.assertEqual(unauthorized.http_status, STATUS.UNAUTHORIZED) self.assertEqual(unauthorized.message, "Unauthorized") class ExceptionMovedPermanently(unittest.TestCase): def test_init(self): moved_permanently = exception.MovedPermanently() self.assertEqual(moved_permanently.http_status, STATUS.MOVED_PERMANENTLY) self.assertEqual(moved_permanently.message, "Moved Permanently") class ExceptionPaymentRequired(unittest.TestCase): def test_init(self): payment_required = exception.PaymentRequired() self.assertEqual(payment_required.http_status, STATUS.PAYMENT_REQUIRED) self.assertEqual(payment_required.message, "Payment Required") class ExceptionForbidden(unittest.TestCase): def test_init(self): forbidden = exception.Forbidden() self.assertEqual(forbidden.http_status, STATUS.FORBIDDEN) self.assertEqual(forbidden.message, "Forbidden") class ExceptionInternalServerError(unittest.TestCase): def test_init(self): internal_server_error = exception.InternalServerError() self.assertEqual(internal_server_error.http_status, STATUS.INTERNAL_SERVER_ERROR) self.assertEqual(internal_server_error.message, "Internal Server Error")
[ "prestans.exception.InconsistentPersistentDataError", "prestans.exception.Base", "prestans.exception.UnsupportedContentTypeError", "prestans.exception.NotFound", "prestans.exception.ServiceUnavailable", "prestans.exception.AuthorizationError", "prestans.exception.InvalidFormatError", "prestans.deserializer.JSON", "prestans.exception.Forbidden", "prestans.exception.ParseFailedError", "prestans.rest.Request", "prestans.exception.HandlerException", "prestans.exception.Unauthorized", "prestans.exception.DataValidationException", "prestans.exception.UnsupportedVocabularyError", "prestans.exception.LessThanMinimumError", "prestans.exception.InvalidChoiceError", "prestans.exception.MissingParameterError", "prestans.exception.ResponseException", "prestans.exception.MoreThanMaximumError", "prestans.exception.UnregisteredAdapterError", "prestans.exception.BadRequest", "prestans.exception.Conflict", "prestans.exception.SerializationFailedError", "prestans.exception.DeSerializationFailedError", "prestans.exception.InternalServerError", "prestans.exception.UnimplementedVerbError", "prestans.exception.RequestException", "prestans.exception.MinimumLengthError", "prestans.exception.NoEndpointError", "prestans.exception.PaymentRequired", "prestans.exception.AttributeFilterDiffers", "logging.basicConfig", "prestans.exception.InvalidMetaValueError", "prestans.exception.MovedPermanently", "prestans.exception.ValidationError", "prestans.exception.InvalidTypeError", "prestans.exception.AuthenticationError", "prestans.exception.MaximumLengthError", "prestans.exception.RequiredAttributeError", "logging.getLogger" ]
[((208, 264), 'prestans.exception.Base', 'exception.Base', ([], {'http_status': 'STATUS.OK', 'message': '"""message"""'}), "(http_status=STATUS.OK, message='message')\n", (222, 264), False, 'from prestans import exception\n'), ((493, 549), 'prestans.exception.Base', 'exception.Base', ([], {'http_status': 'STATUS.OK', 'message': '"""message"""'}), "(http_status=STATUS.OK, message='message')\n", (507, 549), False, 'from prestans import exception\n'), ((692, 748), 'prestans.exception.Base', 'exception.Base', ([], {'http_status': 'STATUS.OK', 'message': '"""message"""'}), "(http_status=STATUS.OK, message='message')\n", (706, 748), False, 'from prestans import exception\n'), ((845, 901), 'prestans.exception.Base', 'exception.Base', ([], {'http_status': 'STATUS.OK', 'message': '"""message"""'}), "(http_status=STATUS.OK, message='message')\n", (859, 901), False, 'from prestans import exception\n'), ((1140, 1237), 'prestans.exception.UnsupportedVocabularyError', 'exception.UnsupportedVocabularyError', ([], {'accept_header': '"""accept"""', 'supported_types': "['a', 'b', 'c']"}), "(accept_header='accept',\n supported_types=['a', 'b', 'c'])\n", (1176, 1237), False, 'from prestans import exception\n'), ((1798, 1869), 'prestans.exception.UnsupportedContentTypeError', 'exception.UnsupportedContentTypeError', (['"""text/plain"""', '"""application/json"""'], {}), "('text/plain', 'application/json')\n", (1835, 1869), False, 'from prestans import exception\n'), ((2369, 2488), 'prestans.exception.ValidationError', 'exception.ValidationError', ([], {'message': '"""message"""', 'attribute_name': '"""attribute"""', 'value': '"""value"""', 'blueprint': "{'key': 'value'}"}), "(message='message', attribute_name='attribute',\n value='value', blueprint={'key': 'value'})\n", (2394, 2488), False, 'from prestans import exception\n'), ((3163, 3184), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (3182, 3184), False, 'import logging\n'), ((3207, 3236), 'logging.getLogger', 'logging.getLogger', (['"""prestans"""'], {}), "('prestans')\n", (3224, 3236), False, 'import logging\n'), ((3371, 3377), 'prestans.deserializer.JSON', 'JSON', ([], {}), '()\n', (3375, 3377), False, 'from prestans.deserializer import JSON\n'), ((3664, 3805), 'prestans.rest.Request', 'Request', ([], {'environ': 'request_environ', 'charset': 'charset', 'logger': 'self.logger', 'deserializers': 'serializers', 'default_deserializer': 'default_serializer'}), '(environ=request_environ, charset=charset, logger=self.logger,\n deserializers=serializers, default_deserializer=default_serializer)\n', (3671, 3805), False, 'from prestans.rest import Request\n'), ((3901, 3956), 'prestans.exception.HandlerException', 'exception.HandlerException', (['STATUS.FORBIDDEN', '"""message"""'], {}), "(STATUS.FORBIDDEN, 'message')\n", (3927, 3956), False, 'from prestans import exception\n'), ((4452, 4507), 'prestans.exception.HandlerException', 'exception.HandlerException', (['STATUS.NOT_FOUND', '"""message"""'], {}), "(STATUS.NOT_FOUND, 'message')\n", (4478, 4507), False, 'from prestans import exception\n'), ((4944, 5005), 'prestans.exception.RequestException', 'exception.RequestException', (['STATUS.BAD_REQUEST', '"""bad request"""'], {}), "(STATUS.BAD_REQUEST, 'bad request')\n", (4970, 5005), False, 'from prestans import exception\n'), ((5264, 5303), 'prestans.exception.UnimplementedVerbError', 'exception.UnimplementedVerbError', (['"""GET"""'], {}), "('GET')\n", (5296, 5303), False, 'from prestans import exception\n'), ((5655, 5682), 'prestans.exception.NoEndpointError', 'exception.NoEndpointError', ([], {}), '()\n', (5680, 5682), False, 'from prestans import exception\n'), ((5944, 5975), 'prestans.exception.AuthenticationError', 'exception.AuthenticationError', ([], {}), '()\n', (5973, 5975), False, 'from prestans import exception\n'), ((6200, 6247), 'prestans.exception.AuthenticationError', 'exception.AuthenticationError', (['"""Custom message"""'], {}), "('Custom message')\n", (6229, 6247), False, 'from prestans import exception\n'), ((6509, 6545), 'prestans.exception.AuthorizationError', 'exception.AuthorizationError', (['"""Role"""'], {}), "('Role')\n", (6537, 6545), False, 'from prestans import exception\n'), ((6836, 6880), 'prestans.exception.SerializationFailedError', 'exception.SerializationFailedError', (['"""format"""'], {}), "('format')\n", (6870, 6880), False, 'from prestans import exception\n'), ((7276, 7322), 'prestans.exception.DeSerializationFailedError', 'exception.DeSerializationFailedError', (['"""format"""'], {}), "('format')\n", (7312, 7322), False, 'from prestans import exception\n'), ((7720, 7768), 'prestans.exception.AttributeFilterDiffers', 'exception.AttributeFilterDiffers', (["['cat', 'dog']"], {}), "(['cat', 'dog'])\n", (7752, 7768), False, 'from prestans import exception\n'), ((8137, 8203), 'prestans.exception.InconsistentPersistentDataError', 'exception.InconsistentPersistentDataError', (['"""name"""', '"""error message"""'], {}), "('name', 'error message')\n", (8178, 8203), False, 'from prestans import exception\n'), ((8758, 8802), 'prestans.exception.DataValidationException', 'exception.DataValidationException', (['"""message"""'], {}), "('message')\n", (8791, 8802), False, 'from prestans import exception\n'), ((9014, 9048), 'prestans.exception.RequiredAttributeError', 'exception.RequiredAttributeError', ([], {}), '()\n', (9046, 9048), False, 'from prestans import exception\n'), ((9313, 9341), 'prestans.exception.ParseFailedError', 'exception.ParseFailedError', ([], {}), '()\n', (9339, 9341), False, 'from prestans import exception\n'), ((9497, 9533), 'prestans.exception.ParseFailedError', 'exception.ParseFailedError', (['"""custom"""'], {}), "('custom')\n", (9523, 9533), False, 'from prestans import exception\n'), ((9756, 9792), 'prestans.exception.LessThanMinimumError', 'exception.LessThanMinimumError', (['(3)', '(5)'], {}), '(3, 5)\n', (9786, 9792), False, 'from prestans import exception\n'), ((10034, 10070), 'prestans.exception.MoreThanMaximumError', 'exception.MoreThanMaximumError', (['(5)', '(3)'], {}), '(5, 3)\n', (10064, 10070), False, 'from prestans import exception\n'), ((10310, 10352), 'prestans.exception.InvalidChoiceError', 'exception.InvalidChoiceError', (['(3)', '[1, 2, 5]'], {}), '(3, [1, 2, 5])\n', (10338, 10352), False, 'from prestans import exception\n'), ((10596, 10634), 'prestans.exception.MinimumLengthError', 'exception.MinimumLengthError', (['"""dog"""', '(5)'], {}), "('dog', 5)\n", (10624, 10634), False, 'from prestans import exception\n'), ((10880, 10918), 'prestans.exception.MaximumLengthError', 'exception.MaximumLengthError', (['"""dog"""', '(2)'], {}), "('dog', 2)\n", (10908, 10918), False, 'from prestans import exception\n'), ((11159, 11199), 'prestans.exception.InvalidTypeError', 'exception.InvalidTypeError', (['"""str"""', '"""int"""'], {}), "('str', 'int')\n", (11185, 11199), False, 'from prestans import exception\n'), ((11450, 11483), 'prestans.exception.MissingParameterError', 'exception.MissingParameterError', ([], {}), '()\n', (11481, 11483), False, 'from prestans import exception\n'), ((11740, 11775), 'prestans.exception.InvalidFormatError', 'exception.InvalidFormatError', (['"""cat"""'], {}), "('cat')\n", (11768, 11775), False, 'from prestans import exception\n'), ((12042, 12075), 'prestans.exception.InvalidMetaValueError', 'exception.InvalidMetaValueError', ([], {}), '()\n', (12073, 12075), False, 'from prestans import exception\n'), ((12347, 12400), 'prestans.exception.UnregisteredAdapterError', 'exception.UnregisteredAdapterError', (['"""namespace.Model"""'], {}), "('namespace.Model')\n", (12381, 12400), False, 'from prestans import exception\n'), ((12810, 12869), 'prestans.exception.ResponseException', 'exception.ResponseException', (['STATUS.OK', '"""message"""', 'my_model'], {}), "(STATUS.OK, 'message', my_model)\n", (12837, 12869), False, 'from prestans import exception\n'), ((13272, 13302), 'prestans.exception.ServiceUnavailable', 'exception.ServiceUnavailable', ([], {}), '()\n', (13300, 13302), False, 'from prestans import exception\n'), ((13562, 13584), 'prestans.exception.BadRequest', 'exception.BadRequest', ([], {}), '()\n', (13582, 13584), False, 'from prestans import exception\n'), ((13807, 13827), 'prestans.exception.Conflict', 'exception.Conflict', ([], {}), '()\n', (13825, 13827), False, 'from prestans import exception\n'), ((14039, 14059), 'prestans.exception.NotFound', 'exception.NotFound', ([], {}), '()\n', (14057, 14059), False, 'from prestans import exception\n'), ((14282, 14306), 'prestans.exception.Unauthorized', 'exception.Unauthorized', ([], {}), '()\n', (14304, 14306), False, 'from prestans import exception\n'), ((14550, 14578), 'prestans.exception.MovedPermanently', 'exception.MovedPermanently', ([], {}), '()\n', (14576, 14578), False, 'from prestans import exception\n'), ((14840, 14867), 'prestans.exception.PaymentRequired', 'exception.PaymentRequired', ([], {}), '()\n', (14865, 14867), False, 'from prestans import exception\n'), ((15112, 15133), 'prestans.exception.Forbidden', 'exception.Forbidden', ([], {}), '()\n', (15131, 15133), False, 'from prestans import exception\n'), ((15372, 15403), 'prestans.exception.InternalServerError', 'exception.InternalServerError', ([], {}), '()\n', (15401, 15403), False, 'from prestans import exception\n'), ((3334, 3340), 'prestans.deserializer.JSON', 'JSON', ([], {}), '()\n', (3338, 3340), False, 'from prestans.deserializer import JSON\n')]
from django.core.paginator import Paginator from django.template import Library from django.utils.translation import ugettext_lazy as _ from touchtechnology.news.models import Article, Category register = Library() @register.filter("category") def get_category(slug): return Category.objects.get(slug=slug) @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_articles(context, article, limit=None, order_by=None): categories = article.categories.live() articles = Article.objects.live() \ .exclude(pk=article.pk) \ .filter(categories__in=categories) \ .distinct() if order_by is not None: articles = articles.order_by(*order_by.split(',')) if limit is not None: articles = articles[:int(limit)] # FIXME backwards compatibility for custom templates context['slice'] = ':' context['article_list'] = articles return context @register.inclusion_tag('touchtechnology/news/_related_list.html', takes_context=True) def related_categories(context, article=None, limit=None): """ If an article is provided, then we select categories relating to it. Otherwise we select all article categories. """ if article is None: categories = Category.objects.all() else: categories = article.categories.all() context['category_list'] = categories return context @register.inclusion_tag('touchtechnology/news/_latest_articles.html', takes_context=True) def latest_articles(context, count=5, title=_("Latest News")): articles = Article.objects.live() paginator = Paginator(articles, count) page = paginator.page(1) context['paginator'] = paginator context['page'] = page context['article_list'] = page.object_list context['title'] = title return context
[ "django.template.Library", "touchtechnology.news.models.Category.objects.get", "touchtechnology.news.models.Article.objects.live", "django.core.paginator.Paginator", "touchtechnology.news.models.Category.objects.all", "django.utils.translation.ugettext_lazy" ]
[((206, 215), 'django.template.Library', 'Library', ([], {}), '()\n', (213, 215), False, 'from django.template import Library\n'), ((282, 313), 'touchtechnology.news.models.Category.objects.get', 'Category.objects.get', ([], {'slug': 'slug'}), '(slug=slug)\n', (302, 313), False, 'from touchtechnology.news.models import Article, Category\n'), ((1697, 1713), 'django.utils.translation.ugettext_lazy', '_', (['"""Latest News"""'], {}), "('Latest News')\n", (1698, 1713), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1731, 1753), 'touchtechnology.news.models.Article.objects.live', 'Article.objects.live', ([], {}), '()\n', (1751, 1753), False, 'from touchtechnology.news.models import Article, Category\n'), ((1770, 1796), 'django.core.paginator.Paginator', 'Paginator', (['articles', 'count'], {}), '(articles, count)\n', (1779, 1796), False, 'from django.core.paginator import Paginator\n'), ((1397, 1419), 'touchtechnology.news.models.Category.objects.all', 'Category.objects.all', ([], {}), '()\n', (1417, 1419), False, 'from touchtechnology.news.models import Article, Category\n'), ((552, 574), 'touchtechnology.news.models.Article.objects.live', 'Article.objects.live', ([], {}), '()\n', (572, 574), False, 'from touchtechnology.news.models import Article, Category\n')]
# Copyright (C) 2022 Red Hat # SPDX-License-Identifier: Apache-2.0 # A copy of logreduce.tokenizer import re import os DAYS = "sunday|monday|tuesday|wednesday|thursday|friday|saturday" MONTHS = ( "january|february|march|april|may|june|july|august|september|" "october|november|december" ) SHORT_MONTHS = "jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dev" SHORT_DAYS = "mon|tue|wed|thu|fri|sat|sun" UUID_RE = r"[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-" "?[0-9a-f]{12}" IPV4_RE = ( r"(([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\.){3}" r"([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])" ) IPV6_RE = r"([0-9A-Fa-f]{0,4}:){2,6}(\d{1,3}\.){0,3}[0-9A-Fa-f]{1,3}" MAC_RE = r"([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})" class Tokenizer: rawline_re = re.compile( # useless http GET r'"GET / HTTP/1.1"' r'|"OPTIONS * HTTP/1.0" 200' # ssh keys r"|AAAA[A-Z][0-9]" # hashed password r"|\$[0-9]\$" # Certificates r"|-----BEGIN" # git status r"|HEAD is now at|Change-Id: " # Download statement r"| ETA " # yum mirrors information r"|\* [a-zA-Z]+: [a-zA-Z0-9\.-]*$|Trying other mirror." # ssh scan attempts r'|audit.*exe="/usr/sbin/sshd"|sshd.*[iI]nvalid user' r"|sshd.*Unable to connect using the available authentication methods" r"|unix_chkpwd.*: password check failed for user" r"|sshd.*: authentication failure" r"|sshd.*: Failed password for" r"|sshd.*- POSSIBLE BREAK-IN ATTEMPT" # zuul random test r"|zuul.*echo BECOME-SUCCESS-" r"|^[^ ]{64}$" # useless debug statement r"|ovs-ofctl .* (dump-ports|dump-flows|show)\b" r"|(ip|eb)tables .* -L\b" ) # See https://en.wikipedia.org/wiki/Percent-encoding uri_percent_re = re.compile(r"%[2345][0-9A-F]") ip_re = re.compile(r"%s|%s|%s" % (IPV4_RE, IPV6_RE, MAC_RE)) # For some unknown reason, '_' in (?=) doesn't work in prefix match # re.sub(r'(?=\b|_)test(?=\b|_)', 'RNG', 'AUTH_test_') -> doesn't work # re.sub(r'(?=\b|_)_?test(?=\b|_)', 'RNG', 'AUTH_test_') -> works power2_re = re.compile( r"(?=\b|_)_?(?:[\w+/]{128}|[\w+/]{64}|" r"[0-9a-fA-F]{40}|[0-9a-fA-F]{32})(?=\b|_)" ) uuid_re = re.compile(r"(?=\b|_)_?(?:%s|tx[^ ]{32})(?=\b|_)" % UUID_RE, re.I) date_re = re.compile( r"\b(?:%s|%s|%s|%s)\b" % (DAYS, SHORT_DAYS, SHORT_MONTHS, MONTHS), re.I ) heat_re = re.compile(r"-\w{12}[- \"$]") comments = re.compile(r'(?:[\s]*# |^%% |^#|^[\s]*id = ").*') alpha_re = re.compile(r"[^a-zA-Z_\/\s]+") gitver_re = re.compile(r"git\w+") digits_re = re.compile(r"0x[0-9a-fA-F]{2,}|[0-9]+(?:\.\d+)?") randpath_re = re.compile( r"(?:/tmp/ansible\.\w{8}" r"|/tmp/tmp\w{6}" r"|/tmp/tmp\.\w{10})\b" ) gitsha_re = re.compile(r"\b\w{7}\.\.\w{7}\b") hash_re = re.compile(r"SHA256:[\w+/]{43}\b") @staticmethod def process(line: str) -> str: # Ignore some raw pattern first if Tokenizer.rawline_re.search(line): return "" strip = line # Break URI percent encoding strip = Tokenizer.uri_percent_re.sub(" ", strip) # Remove words that are exactly 32, 64 or 128 character longs strip = Tokenizer.power2_re.sub("RNGN", strip) # Remove uuid strip = Tokenizer.uuid_re.sub("RNGU", strip) # Remove heat short uuid but keep spacing # ObjectName-2kbhkd45kcs3-ServiceName -> ObjectName-HEATID-ServiceName strip = Tokenizer.heat_re.sub(" HEATID ", strip) # Remove git sha strip = Tokenizer.gitsha_re.sub("RNGG", strip) # Remove hashes strip = Tokenizer.hash_re.sub("RNGH", strip) # Remove random path strip = Tokenizer.randpath_re.sub("RNGP", strip) # Remove date strip = Tokenizer.date_re.sub("DATE", strip) # Remove ip/addr strip = Tokenizer.ip_re.sub("RNGI", strip) # Remove numbers strip = Tokenizer.digits_re.sub("", strip) # Only keep characters strip = Tokenizer.alpha_re.sub(" ", strip) # Remove tiny words strip = " ".join(filter(lambda x: len(x) > 3, strip.split())) # Weight failure token for token in ("error", "fail", "warn"): if token in strip.lower(): strip += " %sA %sB %sC %sD" % (token, token, token, token) return strip
[ "re.compile" ]
[((764, 1316), 're.compile', 're.compile', (['""""GET / HTTP/1.1"|"OPTIONS * HTTP/1.0" 200|AAAA[A-Z][0-9]|\\\\$[0-9]\\\\$|-----BEGIN|HEAD is now at|Change-Id: | ETA |\\\\* [a-zA-Z]+: [a-zA-Z0-9\\\\.-]*$|Trying other mirror.|audit.*exe="/usr/sbin/sshd"|sshd.*[iI]nvalid user|sshd.*Unable to connect using the available authentication methods|unix_chkpwd.*: password check failed for user|sshd.*: authentication failure|sshd.*: Failed password for|sshd.*- POSSIBLE BREAK-IN ATTEMPT|zuul.*echo BECOME-SUCCESS-|^[^ ]{64}$|ovs-ofctl .* (dump-ports|dump-flows|show)\\\\b|(ip|eb)tables .* -L\\\\b"""'], {}), '(\n \'"GET / HTTP/1.1"|"OPTIONS * HTTP/1.0" 200|AAAA[A-Z][0-9]|\\\\$[0-9]\\\\$|-----BEGIN|HEAD is now at|Change-Id: | ETA |\\\\* [a-zA-Z]+: [a-zA-Z0-9\\\\.-]*$|Trying other mirror.|audit.*exe="/usr/sbin/sshd"|sshd.*[iI]nvalid user|sshd.*Unable to connect using the available authentication methods|unix_chkpwd.*: password check failed for user|sshd.*: authentication failure|sshd.*: Failed password for|sshd.*- POSSIBLE BREAK-IN ATTEMPT|zuul.*echo BECOME-SUCCESS-|^[^ ]{64}$|ovs-ofctl .* (dump-ports|dump-flows|show)\\\\b|(ip|eb)tables .* -L\\\\b\'\n )\n', (774, 1316), False, 'import re\n'), ((1866, 1895), 're.compile', 're.compile', (['"""%[2345][0-9A-F]"""'], {}), "('%[2345][0-9A-F]')\n", (1876, 1895), False, 'import re\n'), ((1909, 1960), 're.compile', 're.compile', (["('%s|%s|%s' % (IPV4_RE, IPV6_RE, MAC_RE))"], {}), "('%s|%s|%s' % (IPV4_RE, IPV6_RE, MAC_RE))\n", (1919, 1960), False, 'import re\n'), ((2201, 2305), 're.compile', 're.compile', (['"""(?=\\\\b|_)_?(?:[\\\\w+/]{128}|[\\\\w+/]{64}|[0-9a-fA-F]{40}|[0-9a-fA-F]{32})(?=\\\\b|_)"""'], {}), "(\n '(?=\\\\b|_)_?(?:[\\\\w+/]{128}|[\\\\w+/]{64}|[0-9a-fA-F]{40}|[0-9a-fA-F]{32})(?=\\\\b|_)'\n )\n", (2211, 2305), False, 'import re\n'), ((2333, 2400), 're.compile', 're.compile', (["('(?=\\\\b|_)_?(?:%s|tx[^ ]{32})(?=\\\\b|_)' % UUID_RE)", 're.I'], {}), "('(?=\\\\b|_)_?(?:%s|tx[^ ]{32})(?=\\\\b|_)' % UUID_RE, re.I)\n", (2343, 2400), False, 'import re\n'), ((2414, 2502), 're.compile', 're.compile', (["('\\\\b(?:%s|%s|%s|%s)\\\\b' % (DAYS, SHORT_DAYS, SHORT_MONTHS, MONTHS))", 're.I'], {}), "('\\\\b(?:%s|%s|%s|%s)\\\\b' % (DAYS, SHORT_DAYS, SHORT_MONTHS,\n MONTHS), re.I)\n", (2424, 2502), False, 'import re\n'), ((2526, 2556), 're.compile', 're.compile', (['"""-\\\\w{12}[- \\\\"$]"""'], {}), '(\'-\\\\w{12}[- \\\\"$]\')\n', (2536, 2556), False, 'import re\n'), ((2571, 2621), 're.compile', 're.compile', (['"""(?:[\\\\s]*# |^%% |^#|^[\\\\s]*id = ").*"""'], {}), '(\'(?:[\\\\s]*# |^%% |^#|^[\\\\s]*id = ").*\')\n', (2581, 2621), False, 'import re\n'), ((2636, 2667), 're.compile', 're.compile', (['"""[^a-zA-Z_\\\\/\\\\s]+"""'], {}), "('[^a-zA-Z_\\\\/\\\\s]+')\n", (2646, 2667), False, 'import re\n'), ((2683, 2704), 're.compile', 're.compile', (['"""git\\\\w+"""'], {}), "('git\\\\w+')\n", (2693, 2704), False, 'import re\n'), ((2721, 2771), 're.compile', 're.compile', (['"""0x[0-9a-fA-F]{2,}|[0-9]+(?:\\\\.\\\\d+)?"""'], {}), "('0x[0-9a-fA-F]{2,}|[0-9]+(?:\\\\.\\\\d+)?')\n", (2731, 2771), False, 'import re\n'), ((2789, 2865), 're.compile', 're.compile', (['"""(?:/tmp/ansible\\\\.\\\\w{8}|/tmp/tmp\\\\w{6}|/tmp/tmp\\\\.\\\\w{10})\\\\b"""'], {}), "('(?:/tmp/ansible\\\\.\\\\w{8}|/tmp/tmp\\\\w{6}|/tmp/tmp\\\\.\\\\w{10})\\\\b')\n", (2799, 2865), False, 'import re\n'), ((2899, 2937), 're.compile', 're.compile', (['"""\\\\b\\\\w{7}\\\\.\\\\.\\\\w{7}\\\\b"""'], {}), "('\\\\b\\\\w{7}\\\\.\\\\.\\\\w{7}\\\\b')\n", (2909, 2937), False, 'import re\n'), ((2947, 2982), 're.compile', 're.compile', (['"""SHA256:[\\\\w+/]{43}\\\\b"""'], {}), "('SHA256:[\\\\w+/]{43}\\\\b')\n", (2957, 2982), False, 'import re\n')]
from flink_rest_client.common import _execute_rest_request, RestException class JobTrigger: def __init__(self, prefix, type_name, job_id, trigger_id): self._prefix = prefix self._type_name = type_name self.job_id = job_id self.trigger_id = trigger_id @property def status(self): return _execute_rest_request( url=f"{self._prefix}/{self.job_id}/{self._type_name}/{self.trigger_id}" ) class JobVertexSubtaskClient: def __init__(self, prefix): """ Constructor. Parameters ---------- prefix: str REST API url prefix. It must contain the host, port pair. """ self._prefix = prefix @property def prefix_url(self): return f"{self._prefix}/subtasks" def subtask_ids(self): """ Returns the subtask identifiers. Returns ------- list Positive integer list of subtask ids. """ return [elem["subtask"] for elem in self.accumulators()["subtasks"]] def accumulators(self): """ Returns all user-defined accumulators for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/accumulators Returns ------- dict User-defined accumulators """ return _execute_rest_request(url=f"{self.prefix_url}/accumulators") def metric_names(self): """ Returns the supported metric names. Returns ------- list List of metric names. """ return [ elem["id"] for elem in _execute_rest_request(url=f"{self.prefix_url}/metrics") ] def metrics(self, metric_names=None, agg_modes=None, subtask_ids=None): """ Provides access to aggregated subtask metrics. By default it returns with all existing metric names. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/metrics Parameters ---------- metric_names: list (optional) List of selected specific metric names. Default: <all metrics> agg_modes: list (optional) List of aggregation modes which should be calculated. Available aggregations are: "min, max, sum, avg". Default: <all modes> subtask_ids: list List of positive integers to select specific subtasks. The list of valid subtask ids is available through the subtask_ids() method. Default: <all subtasks>. Returns ------- dict Key-value pairs of metrics. """ if metric_names is None: metric_names = self.metric_names() supported_agg_modes = ["min", "max", "sum", "avg"] if agg_modes is None: agg_modes = supported_agg_modes if len(set(agg_modes).difference(set(supported_agg_modes))) > 0: raise RestException( f"The provided aggregation modes list contains invalid value. Supported aggregation " f"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}" ) if subtask_ids is None: subtask_ids = self.subtask_ids() params = { "get": ",".join(metric_names), "agg": ",".join(agg_modes), "subtasks": ",".join([str(elem) for elem in subtask_ids]), } query_result = _execute_rest_request( url=f"{self.prefix_url}/metrics", params=params ) result = {} for elem in query_result: metric_name = elem.pop("id") result[metric_name] = elem return result def get(self, subtask_id): """ Returns details of the current or latest execution attempt of a subtask. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex Parameters ---------- subtask_id: int Positive integer value that identifies a subtask. Returns ------- dict """ return _execute_rest_request(url=f"{self.prefix_url}/{subtask_id}") def get_attempt(self, subtask_id, attempt_id=None): """ Returns details of an execution attempt of a subtask. Multiple execution attempts happen in case of failure/recovery. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex/attempts/:attempt Parameters ---------- subtask_id: int Positive integer value that identifies a subtask. attempt_id: int (Optional) Positive integer value that identifies an execution attempt. Default: current execution attempt's id Returns ------- dict Details of the selected attempt. """ if attempt_id is None: return self.get(subtask_id) return _execute_rest_request( url=f"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}" ) def get_attempt_accumulators(self, subtask_id, attempt_id=None): """ Returns the accumulators of an execution attempt of a subtask. Multiple execution attempts happen in case of failure/recovery. Parameters ---------- subtask_id: int Positive integer value that identifies a subtask. attempt_id: int (Optional) Positive integer value that identifies an execution attempt. Default: current execution attempt's id Returns ------- dict The accumulators of the selected execution attempt of a subtask. """ if attempt_id is None: attempt_id = self.get(subtask_id)["attempt"] return _execute_rest_request( url=f"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}/accumulators" ) class JobVertexClient: def __init__(self, prefix, job_id, vertex_id): """ Constructor. Parameters ---------- prefix: str REST API url prefix. It must contain the host, port pair. """ self._prefix = prefix self.job_id = job_id self.vertex_id = vertex_id @property def prefix_url(self): return f"{self._prefix}/{self.job_id}/vertices/{self.vertex_id}" @property def subtasks(self): return JobVertexSubtaskClient(self.prefix_url) def details(self): """ Returns details for a task, with a summary for each of its subtasks. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid Returns ------- dict details for a task. """ return _execute_rest_request(url=self.prefix_url) def backpressure(self): """ Returns back-pressure information for a job, and may initiate back-pressure sampling if necessary. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/backpressure Notes ----- The deprecated status means that the back pressure stats are not available. Returns ------- dict Backpressure information """ return _execute_rest_request(url=f"{self.prefix_url}/backpressure") def metric_names(self): """ Returns the supported metric names. Returns ------- list List of metric names. """ return [ elem["id"] for elem in _execute_rest_request(url=f"{self.prefix_url}/metrics") ] def metrics(self, metric_names=None): """ Provides access to task metrics. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/metrics Returns ------- dict Task metrics. """ if metric_names is None: metric_names = self.metric_names() params = {"get": ",".join(metric_names)} query_result = _execute_rest_request( url=f"{self.prefix_url}/metrics", params=params ) result = {} for elem in query_result: metric_name = elem.pop("id") result[metric_name] = elem["value"] return result def subtasktimes(self): """ Returns time-related information for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasktimes Returns ------- dict Time-related information for all subtasks """ return _execute_rest_request(url=f"{self.prefix_url}/subtasktimes") def taskmanagers(self): """ Returns task information aggregated by task manager. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/taskmanagers Returns ------- dict Task information aggregated by task manager. """ return _execute_rest_request(url=f"{self.prefix_url}/taskmanagers") def watermarks(self): """ Returns the watermarks for all subtasks of a task. Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/watermarks Returns ------- list Watermarks for all subtasks of a task. """ return _execute_rest_request(url=f"{self.prefix_url}/watermarks") class JobsClient: def __init__(self, prefix): """ Constructor. Parameters ---------- prefix: str REST API url prefix. It must contain the host, port pair. """ self.prefix = f"{prefix}/jobs" def all(self): """ Returns an overview over all jobs and their current state. Endpoint: [GET] /jobs Returns ------- list List of jobs and their current state. """ return _execute_rest_request(url=self.prefix)["jobs"] def job_ids(self): """ Returns the list of job_ids. Returns ------- list List of job ids. """ return [elem["id"] for elem in self.all()] def overview(self): """ Returns an overview over all jobs. Endpoint: [GET] /jobs/overview Returns ------- list List of existing jobs. """ return _execute_rest_request(url=f"{self.prefix}/overview")["jobs"] def metric_names(self): """ Returns the supported metric names. Returns ------- list List of metric names. """ return [ elem["id"] for elem in _execute_rest_request(url=f"{self.prefix}/metrics") ] def metrics(self, metric_names=None, agg_modes=None, job_ids=None): """ Returns an overview over all jobs. Endpoint: [GET] /jobs/metrics Parameters ---------- metric_names: list (optional) List of selected specific metric names. Default: <all metrics> agg_modes: list (optional) List of aggregation modes which should be calculated. Available aggregations are: "min, max, sum, avg". Default: <all modes> job_ids: list List of 32-character hexadecimal strings to select specific jobs. The list of valid jobs are available through the job_ids() method. Default: <all taskmanagers>. Returns ------- dict Aggregated job metrics. """ if metric_names is None: metric_names = self.metric_names() supported_agg_modes = ["min", "max", "sum", "avg"] if agg_modes is None: agg_modes = supported_agg_modes if len(set(agg_modes).difference(set(supported_agg_modes))) > 0: raise RestException( f"The provided aggregation modes list contains invalid value. Supported aggregation " f"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}" ) if job_ids is None: job_ids = self.job_ids() params = { "get": ",".join(metric_names), "agg": ",".join(agg_modes), "jobs": ",".join(job_ids), } query_result = _execute_rest_request( url=f"{self.prefix}/metrics", params=params ) result = {} for elem in query_result: metric_name = elem.pop("id") result[metric_name] = elem return result def get(self, job_id): """ Returns details of a job. Endpoint: [GET] /jobs/:jobid Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. Returns ------- dict Details of the selected job. """ return _execute_rest_request(url=f"{self.prefix}/{job_id}") def get_config(self, job_id): """ Returns the configuration of a job. Endpoint: [GET] /jobs/:jobid/config Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. Returns ------- dict Job configuration """ return _execute_rest_request(url=f"{self.prefix}/{job_id}/config") def get_exceptions(self, job_id): """ Returns the most recent exceptions that have been handled by Flink for this job. Endpoint: [GET] /jobs/:jobid/exceptions Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. Returns ------- dict The most recent exceptions. """ return _execute_rest_request(url=f"{self.prefix}/{job_id}/exceptions") def get_execution_result(self, job_id): """ Returns the result of a job execution. Gives access to the execution time of the job and to all accumulators created by this job. Endpoint: [GET] /jobs/:jobid/execution-result Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. Returns ------- dict The execution result of the selected job. """ return _execute_rest_request(url=f"{self.prefix}/{job_id}/execution-result") def get_metrics(self, job_id, metric_names=None): """ Provides access to job metrics. Endpoint: [GET] /jobs/:jobid/metrics Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. metric_names: list (optional) List of selected specific metric names. Default: <all metrics> Returns ------- dict Job metrics. """ if metric_names is None: metric_names = self.metric_names() params = {"get": ",".join(metric_names)} query_result = _execute_rest_request( url=f"{self.prefix}/{job_id}/metrics", params=params ) return dict([(elem["id"], elem["value"]) for elem in query_result]) def get_plan(self, job_id): """ Returns the dataflow plan of a job. Endpoint: [GET] /jobs/:jobid/plan Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. Returns ------- dict Dataflow plan """ return _execute_rest_request(url=f"{self.prefix}/{job_id}/plan")["plan"] def get_vertex_ids(self, job_id): """ Returns the ids of vertices of the selected job. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. Returns ------- list List of identifiers. """ return [elem["id"] for elem in self.get(job_id)["vertices"]] def get_accumulators(self, job_id, include_serialized_value=None): """ Returns the accumulators for all tasks of a job, aggregated across the respective subtasks. Endpoint: [GET] /jobs/:jobid/accumulators Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. include_serialized_value: bool (Optional) Boolean value that specifies whether serialized user task accumulators should be included in the response. Returns ------- dict Accumulators for all task. """ params = {} if include_serialized_value is not None: params["includeSerializedValue"] = ( "true" if include_serialized_value else "false" ) return _execute_rest_request( url=f"{self.prefix}/{job_id}/accumulators", http_method="GET", params=params ) def get_checkpointing_configuration(self, job_id): """ Returns the checkpointing configuration of the selected job_id Endpoint: [GET] /jobs/:jobid/checkpoints/config Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. Returns ------- dict Checkpointing configuration of the selected job. """ return _execute_rest_request( url=f"{self.prefix}/{job_id}/checkpoints/config", http_method="GET" ) def get_checkpoints(self, job_id): """ Returns checkpointing statistics for a job. Endpoint: [GET] /jobs/:jobid/checkpoints Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. Returns ------- dict Checkpointing statistics for the selected job: counts, summary, latest and history. """ return _execute_rest_request( url=f"{self.prefix}/{job_id}/checkpoints", http_method="GET" ) def get_checkpoint_ids(self, job_id): """ Returns checkpoint ids of the job_id. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. Returns ------- list List of checkpoint ids. """ return [elem["id"] for elem in self.get_checkpoints(job_id=job_id)["history"]] def get_checkpoint_details(self, job_id, checkpoint_id, show_subtasks=False): """ Returns details for a checkpoint. Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid If show_subtasks is true: Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid/subtasks/:vertexid Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. checkpoint_id: int Long value that identifies a checkpoint. show_subtasks: bool If it is True, the details of the subtask are also returned. Returns ------- dict """ checkpoint_details = _execute_rest_request( url=f"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}", http_method="GET", ) if not show_subtasks: return checkpoint_details subtasks = {} for vertex_id in checkpoint_details["tasks"].keys(): subtasks[vertex_id] = _execute_rest_request( url=f"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}/subtasks/{vertex_id}", http_method="GET", ) checkpoint_details["subtasks"] = subtasks return checkpoint_details def rescale(self, job_id, parallelism): """ Triggers the rescaling of a job. This async operation would return a 'triggerid' for further query identifier. Endpoint: [GET] /jobs/:jobid/rescaling Notes ----- Using Flink version 1.12, the method will raise RestHandlerException because this rescaling is temporarily disabled. See FLINK-12312. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. parallelism: int Positive integer value that specifies the desired parallelism. Returns ------- JobTrigger Object that can be used to query the status of rescaling. """ params = {"parallelism": parallelism} trigger_id = _execute_rest_request( url=f"{self.prefix}/{job_id}/rescaling", http_method="PATCH", params=params )["triggerid"] return JobTrigger(self.prefix, "rescaling", job_id, trigger_id) def create_savepoint(self, job_id, target_directory, cancel_job=False): """ Triggers a savepoint, and optionally cancels the job afterwards. This async operation would return a JobTrigger for further query identifier. Endpoint: [GET] /jobs/:jobid/savepoints Notes ----- The target directory has to be a location accessible by both the JobManager(s) and TaskManager(s) e.g. a location on a distributed file-system or Object Store. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. target_directory: str Savepoint target directory. cancel_job: bool If it is True, it also stops the job after the savepoint creation. Returns ------- JobTrigger Object that can be used to query the status of savepoint. """ trigger_id = _execute_rest_request( url=f"{self.prefix}/{job_id}/savepoints", http_method="POST", accepted_status_code=202, json={"cancel-job": cancel_job, "target-directory": target_directory}, )["request-id"] return JobTrigger(self.prefix, "savepoints", job_id, trigger_id) def terminate(self, job_id): """ Terminates a job. Endpoint: [PATCH] /jobs/:jobid Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. Returns ------- bool True if the job has been canceled, otherwise False. """ res = _execute_rest_request( url=f"{self.prefix}/{job_id}", http_method="PATCH", accepted_status_code=202 ) if len(res) < 1: return True else: return False def stop(self, job_id, target_directory, drain=False): """ Stops a job with a savepoint. This async operation would return a JobTrigger for further query identifier. Attention: The target directory has to be a location accessible by both the JobManager(s) and TaskManager(s) e.g. a location on a distributed file-system or Object Store. Draining emits the maximum watermark before stopping the job. When the watermark is emitted, all event time timers will fire, allowing you to process events that depend on this timer (e.g. time windows or process functions). This is useful when you want to fully shut down your job without leaving any unhandled events or state. Endpoint: [GET] /jobs/:jobid/stop Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. target_directory: str Savepoint target directory. drain: bool (Optional) If it is True, it emits the maximum watermark before stopping the job. default: False Returns ------- JobTrigger Object that can be used to query the status of savepoint. """ data = { "drain": False if drain is None else drain, "targetDirectory": target_directory, } trigger_id = _execute_rest_request( url=f"{self.prefix}/{job_id}/stop", http_method="POST", accepted_status_code=202, json=data, )["request-id"] return JobTrigger(self.prefix, "savepoints", job_id, trigger_id) def get_vertex(self, job_id, vertex_id): """ Returns a JobVertexClient. Parameters ---------- job_id: str 32-character hexadecimal string value that identifies a job. vertex_id: str 32-character hexadecimal string value that identifies a vertex. Returns ------- JobVertexClient JobVertexClient instance that can execute vertex related queries. """ return JobVertexClient(self.prefix, job_id, vertex_id)
[ "flink_rest_client.common._execute_rest_request" ]
[((341, 440), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self._prefix}/{self.job_id}/{self._type_name}/{self.trigger_id}"""'}), "(url=\n f'{self._prefix}/{self.job_id}/{self._type_name}/{self.trigger_id}')\n", (362, 440), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((1371, 1431), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix_url}/accumulators"""'}), "(url=f'{self.prefix_url}/accumulators')\n", (1392, 1431), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((3477, 3547), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix_url}/metrics"""', 'params': 'params'}), "(url=f'{self.prefix_url}/metrics', params=params)\n", (3498, 3547), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((4132, 4192), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix_url}/{subtask_id}"""'}), "(url=f'{self.prefix_url}/{subtask_id}')\n", (4153, 4192), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((4969, 5056), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix_url}/{subtask_id}/attempts/{attempt_id}"""'}), "(url=\n f'{self.prefix_url}/{subtask_id}/attempts/{attempt_id}')\n", (4990, 5056), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((5824, 5924), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix_url}/{subtask_id}/attempts/{attempt_id}/accumulators"""'}), "(url=\n f'{self.prefix_url}/{subtask_id}/attempts/{attempt_id}/accumulators')\n", (5845, 5924), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((6769, 6811), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'self.prefix_url'}), '(url=self.prefix_url)\n', (6790, 6811), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((7253, 7313), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix_url}/backpressure"""'}), "(url=f'{self.prefix_url}/backpressure')\n", (7274, 7313), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((8019, 8089), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix_url}/metrics"""', 'params': 'params'}), "(url=f'{self.prefix_url}/metrics', params=params)\n", (8040, 8089), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((8584, 8644), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix_url}/subtasktimes"""'}), "(url=f'{self.prefix_url}/subtasktimes')\n", (8605, 8644), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((8947, 9007), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix_url}/taskmanagers"""'}), "(url=f'{self.prefix_url}/taskmanagers')\n", (8968, 9007), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((9298, 9356), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix_url}/watermarks"""'}), "(url=f'{self.prefix_url}/watermarks')\n", (9319, 9356), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((12291, 12357), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/metrics"""', 'params': 'params'}), "(url=f'{self.prefix}/metrics', params=params)\n", (12312, 12357), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((12896, 12948), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/{job_id}"""'}), "(url=f'{self.prefix}/{job_id}')\n", (12917, 12948), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((13320, 13379), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/{job_id}/config"""'}), "(url=f'{self.prefix}/{job_id}/config')\n", (13341, 13379), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((13814, 13877), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/{job_id}/exceptions"""'}), "(url=f'{self.prefix}/{job_id}/exceptions')\n", (13835, 13877), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((14395, 14464), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/{job_id}/execution-result"""'}), "(url=f'{self.prefix}/{job_id}/execution-result')\n", (14416, 14464), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((15099, 15174), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/{job_id}/metrics"""', 'params': 'params'}), "(url=f'{self.prefix}/{job_id}/metrics', params=params)\n", (15120, 15174), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((16963, 17066), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/{job_id}/accumulators"""', 'http_method': '"""GET"""', 'params': 'params'}), "(url=f'{self.prefix}/{job_id}/accumulators',\n http_method='GET', params=params)\n", (16984, 17066), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((17547, 17641), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/{job_id}/checkpoints/config"""', 'http_method': '"""GET"""'}), "(url=f'{self.prefix}/{job_id}/checkpoints/config',\n http_method='GET')\n", (17568, 17641), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((18115, 18202), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/{job_id}/checkpoints"""', 'http_method': '"""GET"""'}), "(url=f'{self.prefix}/{job_id}/checkpoints',\n http_method='GET')\n", (18136, 18202), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((19372, 19488), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}"""', 'http_method': '"""GET"""'}), "(url=\n f'{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}',\n http_method='GET')\n", (19393, 19488), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((22672, 22775), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/{job_id}"""', 'http_method': '"""PATCH"""', 'accepted_status_code': '(202)'}), "(url=f'{self.prefix}/{job_id}', http_method='PATCH',\n accepted_status_code=202)\n", (22693, 22775), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((9875, 9913), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'self.prefix'}), '(url=self.prefix)\n', (9896, 9913), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((10361, 10413), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/overview"""'}), "(url=f'{self.prefix}/overview')\n", (10382, 10413), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((15636, 15693), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/{job_id}/plan"""'}), "(url=f'{self.prefix}/{job_id}/plan')\n", (15657, 15693), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((19701, 19839), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}/subtasks/{vertex_id}"""', 'http_method': '"""GET"""'}), "(url=\n f'{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}/subtasks/{vertex_id}'\n , http_method='GET')\n", (19722, 19839), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((20797, 20900), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/{job_id}/rescaling"""', 'http_method': '"""PATCH"""', 'params': 'params'}), "(url=f'{self.prefix}/{job_id}/rescaling', http_method=\n 'PATCH', params=params)\n", (20818, 20900), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((21965, 22154), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/{job_id}/savepoints"""', 'http_method': '"""POST"""', 'accepted_status_code': '(202)', 'json': "{'cancel-job': cancel_job, 'target-directory': target_directory}"}), "(url=f'{self.prefix}/{job_id}/savepoints', http_method\n ='POST', accepted_status_code=202, json={'cancel-job': cancel_job,\n 'target-directory': target_directory})\n", (21986, 22154), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((24281, 24400), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/{job_id}/stop"""', 'http_method': '"""POST"""', 'accepted_status_code': '(202)', 'json': 'data'}), "(url=f'{self.prefix}/{job_id}/stop', http_method=\n 'POST', accepted_status_code=202, json=data)\n", (24302, 24400), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((1673, 1728), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix_url}/metrics"""'}), "(url=f'{self.prefix_url}/metrics')\n", (1694, 1728), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((7555, 7610), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix_url}/metrics"""'}), "(url=f'{self.prefix_url}/metrics')\n", (7576, 7610), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n'), ((10651, 10702), 'flink_rest_client.common._execute_rest_request', '_execute_rest_request', ([], {'url': 'f"""{self.prefix}/metrics"""'}), "(url=f'{self.prefix}/metrics')\n", (10672, 10702), False, 'from flink_rest_client.common import _execute_rest_request, RestException\n')]
""" Copyright 2018, <NAME>, Stevens Institute of Technology Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import sys, os sys.path.append(os.path.abspath('..')) from resources.classes import * from resources.globalv import * from collections import defaultdict, Counter from itertools import product import pickle import random import hashlib from resources.optimizeMILP import optimizeMILP from multiprocessing import Process, Manager import argparse dir_topologies = os.path.abspath('..') + '/topologies_new/' def createNetTopologies(): global seedlist, filename, numfederates, elementnames, edgedivider numberfederates = numfederates*[len(elementnames)//numfederates] # print([s.name for s in sources]) destinations = elementnames[-2:] sources = [e for e in elementnames if e not in destinations] if os.path.isfile(filename): with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) hashNetworkDict = {h: obj for h,obj in hashNetworkDict.items() if obj.costValueDict} else: hashNetworkDict = {} for seed in seedlist: # print(seed) random.seed(seed) while sum(numberfederates)<len(elementnames): i = random.choice(range(len(numberfederates))) numberfederates[i] += 1 namelist = [n*['f%d'%i] for i, n in enumerate(numberfederates)] federatenames = [e for l in namelist for e in l] random.shuffle(federatenames) # print("shuffle:", federatenames) # all_edges = [(satellites[0],satellites[1]), (satellites[3],stations[0]), (satellites[1],satellites[3]), # (satellites[2],satellites[4]), (satellites[2],satellites[1]), (satellites[2],satellites[3]), (satellites[3],satellites[4]), (satellites[4],stations[1]), (satellites[2],stations[0])] # all_possible_edges = [(a,b) for a, b in list(product(elementnames, elementnames)) if (a != b and element_federate_dict[a] != element_federate_dict[b])] all_possible_edges = [] all_edges = [] # while len([l for l in all_possible_edges if l[1] in destinations])<len(elements)//linkcapacity: all_possible_edges = [(a,b) for a, b in list(product(elementnames, elementnames)) if (a != b and not (a in destinations))] all_edges = random.sample(all_possible_edges, int(len(all_possible_edges)//edgedivider)) edge2destin = [l for l in all_possible_edges if l[1] in destinations and l not in all_edges] existingedges2desgin = [l for l in all_edges if l[1] in destinations] nume2d = int(len(sources)/2 - len(existingedges2desgin)) # print(nume2d) if nume2d>0: newedges = random.sample(edge2destin, nume2d) # print(len(all_edges)) all_edges = all_edges + newedges # print(newedges) # print(len(all_edges)) all_edge_set = set([]) destin_count = 0 for edge in all_edges: s, d = edge # if destin_count > len(satellites): # continue if s in destinations or d in destinations: destin_count += linkcapacity all_edge_set.add((s,d)) all_edge_set.add((d,s)) all_edges = list(all_edge_set) tempNetTop = NetTop(elementnames, all_edges, federatenames, sources, destinations) if tempNetTop.hashid not in hashNetworkDict: # print(seed, tempNetTop.hashid) hashNetworkDict[tempNetTop.hashid] = tempNetTop with open(filename, 'wb') as outfile: pickle.dump(hashNetworkDict, outfile) def calCostValue(nettopObj): federatenames = nettopObj.federates # fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname in federatenames} # federates = [Federate(name = f, cash = 0, sharelinkcost = fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for f in set(federatenames)] # federateDict = {f.name: f for f in federates} # # print("element names:", nettopObj.elements) # elements = [Element(name = e, capacity=elementcapacity, size = 0, owner = federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)] # elementDict = {e.name: e for e in elements} # sources = [e for e in elements if e.name not in nettopObj.destinations] # # sources = nettopObj.sources # # print([s.name for s in sources]) # linklist = [Link(source = elementDict[e1], destin = elementDict[e2], capacity = linkcapacity, size = 0, owner = elementDict[e2].owner) for (e1, e2) in nettopObj.edges] # time = 0 # newtasks = [Task(id = id + n, element=s, lastelement=s, size=size, value=value, expiration=time + 5, init=time, active=True, penalty=penalty) for n, s in enumerate(sources)] # elfedDict = {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)} # federates = [Federate(name = f, cash = 0, sharelinkcost = 0, uselinkcost = 0) for f in set(federatenames)] # federateDict = {f.name: f for f in federates} # # print("element names:", nettopObj.elements) # elements = [Element(name = e, capacity=elementcapacity, size = 0, owner = federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)] # elementDict = {e.name: e for e in elements} # sources = [e for e in elements if e.name not in nettopObj.destinations] # # sources = nettopObj.sources # # print([s.name for s in sources]) # linklist = [Link(source = elementDict[e1], destin = elementDict[e2], capacity = linkcapacity, size = 0, owner = elementDict[e2].owner) for (e1, e2) in nettopObj.edges] # time = 0 # newtasks = [Task(id = id + n, element=s, lastelement=s, size=size, value=value, expiration=time + 5, init=time, active=True, penalty=penalty) for n, s in enumerate(sources)] # elfedDict = {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)} # print(elfedDict) # print("new tasks:", newtasks) for sharelinkcost, uselinkcost in basetuples: fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname in federatenames} federates = [Federate(name = f, cash = 0, sharelinkcost = fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for f in set(federatenames)] federateDict = {f.name: f for f in federates} # print("element names:", nettopObj.elements) elements = [Element(name = e, capacity=elementcapacity, size = 0, owner = federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)] elementDict = {e.name: e for e in elements} sources = [e for e in elements if e.name not in nettopObj.destinations] # sources = nettopObj.sources # print([s.name for s in sources]) linklist = [Link(source = elementDict[e1], destin = elementDict[e2], capacity = linkcapacity, size = 0, owner = elementDict[e2].owner) for (e1, e2) in nettopObj.edges] time = 0 newtasks = [Task(id = id + n, element=s, lastelement=s, size=size, value=value, expiration=time + 5, init=time, active=True, penalty=penalty) for n, s in enumerate(sources)] elfedDict = {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)} # print("new tuple:", sharelinkcost, uselinkcost) # print("length of cost value dict:", len(nettopObj.costValueDict)) # print(nettopObj.hashid, nettopObj.costValueDict) # if (sharelinkcost, uselinkcost) not in nettopObj.costValueDict or nettopObj.costValueDict[(sharelinkcost, uselinkcost)] == 0: # for f in federates: # f.cash = 0 # f.sharelinkcost = sharelinkcost # f.uselinkcost = uselinkcost edgePriceDict = {e: fedPriceDict[elfedDict[e[1]]][0] for e in nettopObj.edges} # print(edgePriceDict) # print(nettopObj.hashid) # print(fedPriceDict) # print(linklist) # print(nettopObj.destinations) # print(len(newtasks)) # print(federates) # print(linklist) solutionObj = MILPSolution(nettopObj.hashid, time, fedPriceDict = fedPriceDict, fedValDict = {f: 0 for f in fedPriceDict.keys()}, edgelist = []) solutionObj = optimizeMILP(elements = elements, linklist = linklist, destinations = nettopObj.destinations, storedtasks = [], newtasks = newtasks, time = time, federates = federates, edgePriceDict = edgePriceDict, solutionObj = solutionObj) totalvalue = solutionObj.totalvalue # print(solutionObj.sourceEdgeDict) # print(solutionObj.fedValDict) nettopObj.costValueDict[(sharelinkcost, uselinkcost)] = totalvalue # print("New tuple cost and value:", sharelinkcost, uselinkcost, totalvalue) def updateCostValue(objlist, proc, tempfilename): global filename if os.path.isdir("/home/abbas.ehsanfar/gurobi"): hostname = os.environ['HOSTNAME'] os.environ['GRB_LICENSE_FILE'] = "/home/abbas.ehsanfar/gurobi/%s/lic%s/gurobi.lic"%(hostname,str(proc%30).zfill(2)) for k, nettopObj in enumerate(objlist): # print("New topoology:", nettopObj.hashid) calCostValue(nettopObj) if k%20 == 0: objDict = {obj.hashid: obj for obj in objlist} with open(tempfilename, 'wb') as outfile: pickle.dump(objDict, outfile) # with open(filename, 'rb') as infile: # hashNetworkDict = pickle.load(infile) # for h, obj in objDict.items(): # hashNetworkDict[h] = obj # with open(filename, 'wb') as outfile: # pickle.dump(hashNetworkDict, outfile) with open(tempfilename, 'wb') as outfile: objDict = {obj.hashid: obj for obj in objlist} pickle.dump(objDict, outfile) def multiProcCostValue(): global nproc, filename with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) topollist = list(hashNetworkDict.values()) N = len(topollist) allProcs = [] for proc in range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) inds = range(proc, N, nproc) objlist = [topollist[i] for i in inds] p = Process(target=updateCostValue, args=(objlist,proc,tempfilename)) p.start() allProcs.append(p) for a in allProcs: a.join() finalDict = {} for proc in range(nproc): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with open(tempfilename, 'rb') as infile: hashNetworkDict = pickle.load(infile) for h, obj in hashNetworkDict.items(): finalDict[h] = obj with open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile) for proc in range(nproc): os.remove(filename[:-2] + '_proc%s.p'%str(proc).zfill(2)) def calAuctionScore(): global filename print(filename) if os.path.isfile(filename): with open(filename, 'rb') as infile: hashNetworkDict = pickle.load(infile) # topollist = hashNetworkDict.values() # if len(hashNetworkDict) < 1000: # return for k, topol in hashNetworkDict.items(): costdict = topol.costValueDict maxtup = (0, 1000) for mintup in [(500,501), (400,600)]: if mintup in costdict: topol.auctionscore += costdict[maxtup] - costdict[mintup] # print(topol.auctionscore) toplist = sorted(hashNetworkDict.values(), key = lambda x: x.auctionscore, reverse = True)[:10] # print(filename, [e.auctionscore for e in toplist]) with open(filename[:-2] + '_top10.p', 'wb') as outfile: pickle.dump(toplist, outfile) with open(filename[:-2] + '_score.p', 'wb') as outfile: pickle.dump(hashNetworkDict, outfile) else: return def aggregate60Nodes(): for numfederates, numelements, edgedivider in [(4,20,7), (4,20,11), (4,20,3), (4,20,5)]: filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) finalDict = {} for proc in range(60): tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2) with open(tempfilename, 'rb') as infile: hashNetworkDict = pickle.load(infile) for h, obj in list(hashNetworkDict.items()): finalDict[h] = obj with open(filename, 'wb') as outfile: pickle.dump(finalDict, outfile) def aggregateNetworks(): netlist = [] for (numfederates, numelements), edgedivider in list(fedeldensitylist): filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d_top10.p'%(numelements, numfederates, edgedivider) # if os.path.isfile(filename): with open(filename, 'rb') as infile: netlist.extend(pickle.load(infile)) hashNetDict = {net.hashid: net for net in netlist} with open(dir_topologies + 'hashNetDict.p', 'wb') as outfile: pickle.dump(hashNetDict, outfile) if __name__ == '__main__': parser = argparse.ArgumentParser(description="This processed raw data of twitter.") parser.add_argument('--nproc', type=int, default=3, help='cores on server') parser.add_argument('--n', type=int, default=3, help='cores on server') args = parser.parse_args() argsdict = vars(args) nproc = argsdict['nproc'] time = 0 # basecost = [0, 200, 400, 600, 800, 1000] seedlist = list(range(0,500)) # for (numfederates, numelements), edgedivider in reversed(list(product([(2,10), (2,15), (3,15), (2,20), (3,20), (4,20)], [3,5,7,11]))): aggregateNetworks() # for (numfederates, numelements), edgedivider in list(fedeldensitylist): # filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider) # elementnames = ['e%d'%(i+1) for i in range(numelements)] # createNetTopologies() # multiProcCostValue() # calAuctionScore()
[ "os.path.abspath", "pickle.dump", "argparse.ArgumentParser", "os.path.isdir", "random.shuffle", "random.sample", "os.path.isfile", "pickle.load", "random.seed", "itertools.product", "multiprocessing.Process", "resources.optimizeMILP.optimizeMILP" ]
[((644, 665), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (659, 665), False, 'import sys, os\n'), ((979, 1000), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (994, 1000), False, 'import sys, os\n'), ((1328, 1352), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (1342, 1352), False, 'import sys, os\n'), ((8638, 8682), 'os.path.isdir', 'os.path.isdir', (['"""/home/abbas.ehsanfar/gurobi"""'], {}), "('/home/abbas.ehsanfar/gurobi')\n", (8651, 8682), False, 'import sys, os\n'), ((10505, 10529), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (10519, 10529), False, 'import sys, os\n'), ((12505, 12579), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""This processed raw data of twitter."""'}), "(description='This processed raw data of twitter.')\n", (12528, 12579), False, 'import argparse\n'), ((1622, 1639), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1633, 1639), False, 'import random\n'), ((1888, 1917), 'random.shuffle', 'random.shuffle', (['federatenames'], {}), '(federatenames)\n', (1902, 1917), False, 'import random\n'), ((3804, 3841), 'pickle.dump', 'pickle.dump', (['hashNetworkDict', 'outfile'], {}), '(hashNetworkDict, outfile)\n', (3815, 3841), False, 'import pickle\n'), ((8068, 8286), 'resources.optimizeMILP.optimizeMILP', 'optimizeMILP', ([], {'elements': 'elements', 'linklist': 'linklist', 'destinations': 'nettopObj.destinations', 'storedtasks': '[]', 'newtasks': 'newtasks', 'time': 'time', 'federates': 'federates', 'edgePriceDict': 'edgePriceDict', 'solutionObj': 'solutionObj'}), '(elements=elements, linklist=linklist, destinations=nettopObj.\n destinations, storedtasks=[], newtasks=newtasks, time=time, federates=\n federates, edgePriceDict=edgePriceDict, solutionObj=solutionObj)\n', (8080, 8286), False, 'from resources.optimizeMILP import optimizeMILP\n'), ((9459, 9488), 'pickle.dump', 'pickle.dump', (['objDict', 'outfile'], {}), '(objDict, outfile)\n', (9470, 9488), False, 'import pickle\n'), ((9601, 9620), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (9612, 9620), False, 'import pickle\n'), ((9873, 9940), 'multiprocessing.Process', 'Process', ([], {'target': 'updateCostValue', 'args': '(objlist, proc, tempfilename)'}), '(target=updateCostValue, args=(objlist, proc, tempfilename))\n', (9880, 9940), False, 'from multiprocessing import Process, Manager\n'), ((10321, 10352), 'pickle.dump', 'pickle.dump', (['finalDict', 'outfile'], {}), '(finalDict, outfile)\n', (10332, 10352), False, 'import pickle\n'), ((12429, 12462), 'pickle.dump', 'pickle.dump', (['hashNetDict', 'outfile'], {}), '(hashNetDict, outfile)\n', (12440, 12462), False, 'import pickle\n'), ((1424, 1443), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (1435, 1443), False, 'import pickle\n'), ((3051, 3085), 'random.sample', 'random.sample', (['edge2destin', 'nume2d'], {}), '(edge2destin, nume2d)\n', (3064, 3085), False, 'import random\n'), ((10184, 10203), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (10195, 10203), False, 'import pickle\n'), ((10592, 10611), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (10603, 10611), False, 'import pickle\n'), ((11198, 11227), 'pickle.dump', 'pickle.dump', (['toplist', 'outfile'], {}), '(toplist, outfile)\n', (11209, 11227), False, 'import pickle\n'), ((11292, 11329), 'pickle.dump', 'pickle.dump', (['hashNetworkDict', 'outfile'], {}), '(hashNetworkDict, outfile)\n', (11303, 11329), False, 'import pickle\n'), ((11917, 11948), 'pickle.dump', 'pickle.dump', (['finalDict', 'outfile'], {}), '(finalDict, outfile)\n', (11928, 11948), False, 'import pickle\n'), ((9071, 9100), 'pickle.dump', 'pickle.dump', (['objDict', 'outfile'], {}), '(objDict, outfile)\n', (9082, 9100), False, 'import pickle\n'), ((11773, 11792), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (11784, 11792), False, 'import pickle\n'), ((12286, 12305), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (12297, 12305), False, 'import pickle\n'), ((2609, 2644), 'itertools.product', 'product', (['elementnames', 'elementnames'], {}), '(elementnames, elementnames)\n', (2616, 2644), False, 'from itertools import product\n')]
import basics import random import math import matplotlib.pyplot as plt import numpy as np import cProfile import pstats import time class node(): # Has a keyword that defines it's means ofo decrypting the text # Can reproduce to make a mutated offspring def __init__(self, key=None): self.key = key def reproduce(self): pass class algorithm(): #has a population of nodes with keywords, can breed to make offspring with random #mutations/changes, can cull to select for best english scoring offspring def __init__(self, text, population_size, breeding_times, node_class): self.text = text self.breeding_times = breeding_times # how many times each parent will breed self.population_size = population_size self.population = [] self.past_generations = [] self.complete_scores = [] # stores the complete score history self.summary_scores = [] # stores min max mean median self.node = node_class # stores the type of node self.scorer = basics.ngram_score('english_trigrams.txt', 'english_quadgrams.txt') self.base_score = self.scorer.score(basics.generate_random_text(len(text))) self.english_score = self.scorer.score(basics.generate_english_text(len(text))) self.cycles_count = 0 self.graphing = False #When turned on, cull() passes new scores to the graph #self.initialize_population() def initialize_graph(self): self.graphing = True self.ax = plt.gca() if len(self.summary_scores) > 0: start_num_points = len(self.summary_scores) xdata = np.array([x for x in range(1, start_num_points)]) self.lines = [self.ax.plot(xdata, [score[num] for score in self.summary_scores])[0] for num in range(3)] else: self.lines = [self.ax.plot([], [])[0] for num in range(3)] self.ax.relim() self.ax.autoscale_view(True, True, True) plt.ion() plt.show() def update_line(self, line, new_data): #Given a line and new_data of the form [new_x, new_y], adds on the new values line.set_xdata(np.append(line.get_xdata(), new_data[0])) line.set_ydata(np.append(line.get_ydata(), new_data[1])) def update_graph(self): for num in range(len(self.lines)): self.update_line(self.lines[num], [self.cycles_count, self.summary_scores[-1][num]]) self.ax.relim() self.ax.autoscale_view(True, True, True) plt.draw() plt.pause(0.01) def initialize_population(self): # Initializes the population with size self.population, hopefully near to endpoint pass def score(self, my_node): return(self.scorer.score(self.decrypt(self.text, my_node.key))) def decrypt(self, text, key): pass def cycle(self, ntimes=1): # Does ntimes cycles of breed and cull for num in range(ntimes): self.cycles_count += 1 self.breed() self.cull() def run_to_score(self, score): # Keeps cycling until the latest population's mean score is greater than score while True: self.cycle() if self.summary_scores[-1][2] > score: break def turnover(self, breeding_times, num_cycles_small, num_cycles_large, cullsize): for num in range(num_cycles_large): for num in range(num_cycles_small): self.breed(size=len(self.population), times=breeding_times) self.cull(size=cullsize) def breed(self, size=None, times=None): """Replaces self.population with a whole load of newly bred offspring, randomly selecting who pairs with who""" if size == None: size = self.population_size if times == None: times = self.breeding_times self.offspring = [] for pop_num in range(size): for breed_num in range(times): self.offspring.append(self.population[pop_num].reproduce()) # archive the parent generation, make the new population the offspring. self.past_generations.append(self.population) self.population = self.offspring def cull(self, size=None): #size is the final size (post culling) of the population if size == None: size = self.population_size """Removes the bottom scorers of the population until the population fits population_size""" # From each node in population we get [node_index, node_score] in population_ranking population_ranking = [[x, self.score(self.population[x])] for x in \ range(len(self.population))] population_ranking.sort(key=lambda x: x[1]) # sort by score from lowest to highest # The new population is the top population_size guys as ranked # x[0] is the index of the node self.population = [self.population[x[0]] for x in population_ranking[-size:]] # The actual scores, with the same indices as their node counterparts in population self.ranking = [x[1] for x in population_ranking[-size:]] #score keeping self.complete_scores.append(self.ranking) botpercentile = self.ranking[math.floor(0.05*size)] toppercentile = self.ranking[math.floor(0.95*size)] median = self.ranking[math.ceil(size/2)] self.summary_scores.append([botpercentile, median, toppercentile]) # if graphing is turned on, send the new data to the graph if self.graphing == True: self.update_graph() class genetic_algorithm(algorithm): def breed(self, size=None, times=None): """Replaces self.population with a whole load of newly bred offspring, randomly selecting who pairs with who""" if size == None: size = self.population_size if times == None: times = self.breeding_times self.offspring = [] # 0 will increment each time a node breeds, until it reaches breeding_times available = [[x, 0] for x in self.population] # who is left available while True: # take the first node in available as the base, breed them with random partners # in available, then remove first node from available # range(...) ensures we breed the right number of times for breed_count in range(available[0][1], self.breeding_times): try: # try to choose a partner from those in available choice = random.choice(available[1:]) except IndexError: #Sometimes the last guy gets left out #print('ruh roh') choice = [random.choice(self.population), -1] # breed with the chosen partner self.offspring.append(available[0][0].reproduce(choice[0])) # increase the partner's breed count by one choice[1] += 1 # if the partner's bred the requisite number of times, remove them from available if choice[1] == self.breeding_times: available.remove(choice) # remove our start node from available del(available[0]) # if everyone's bred, break the loop if len(available) == 0: break # archive the parent generation, make the new population the offspring. self.past_generations.append(self.population) self.population = self.offspring def algorithm_avg_time(n, score, algorithm, *args, graphing=False, **kwargs): """Makes n algorithms, returns the avg time for them to run to a score, given""" algorithms = [] for num in range(n): algorithms.append(algorithm(*args, **kwargs)) if graphing: algorithms[-1].initialize_graph() prof = cProfile.Profile() for num in range(n): print('{0} out of {1}:'.format(num+1, n), end='') prof.runctx('algorithms[num].run_to_score(score)', globals(), locals()) if graphing: for line in algorithms[num].lines: line.remove() stats = pstats.Stats() stats.add(prof) return(stats)
[ "matplotlib.pyplot.show", "pstats.Stats", "math.ceil", "math.floor", "random.choice", "cProfile.Profile", "matplotlib.pyplot.draw", "matplotlib.pyplot.ion", "matplotlib.pyplot.gca", "matplotlib.pyplot.pause", "basics.ngram_score" ]
[((8274, 8292), 'cProfile.Profile', 'cProfile.Profile', ([], {}), '()\n', (8290, 8292), False, 'import cProfile\n'), ((8583, 8597), 'pstats.Stats', 'pstats.Stats', ([], {}), '()\n', (8595, 8597), False, 'import pstats\n'), ((1089, 1156), 'basics.ngram_score', 'basics.ngram_score', (['"""english_trigrams.txt"""', '"""english_quadgrams.txt"""'], {}), "('english_trigrams.txt', 'english_quadgrams.txt')\n", (1107, 1156), False, 'import basics\n'), ((1575, 1584), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1582, 1584), True, 'import matplotlib.pyplot as plt\n'), ((2049, 2058), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (2056, 2058), True, 'import matplotlib.pyplot as plt\n'), ((2068, 2078), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2076, 2078), True, 'import matplotlib.pyplot as plt\n'), ((2601, 2611), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2609, 2611), True, 'import matplotlib.pyplot as plt\n'), ((2621, 2636), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (2630, 2636), True, 'import matplotlib.pyplot as plt\n'), ((5504, 5527), 'math.floor', 'math.floor', (['(0.05 * size)'], {}), '(0.05 * size)\n', (5514, 5527), False, 'import math\n'), ((5565, 5588), 'math.floor', 'math.floor', (['(0.95 * size)'], {}), '(0.95 * size)\n', (5575, 5588), False, 'import math\n'), ((5619, 5638), 'math.ceil', 'math.ceil', (['(size / 2)'], {}), '(size / 2)\n', (5628, 5638), False, 'import math\n'), ((6916, 6944), 'random.choice', 'random.choice', (['available[1:]'], {}), '(available[1:])\n', (6929, 6944), False, 'import random\n'), ((7089, 7119), 'random.choice', 'random.choice', (['self.population'], {}), '(self.population)\n', (7102, 7119), False, 'import random\n')]
# -*- coding: utf-8 -*- # Resource object code # # Created by: The Resource Compiler for PyQt5 (Qt v5.12.5) # # WARNING! All changes made in this file will be lost! # from PyQt5 import QtCore from silx.gui import qt as QtCore qt_resource_data = b"\ \x00\x00\x19\x3d\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x32\x00\x00\x00\x32\x08\x06\x00\x00\x00\x1e\x3f\x88\xb1\ \x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\ \xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\ \x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\ \xe3\x01\x1e\x09\x34\x07\xfb\x3d\x97\x4b\x00\x00\x00\x1d\x69\x54\ \x58\x74\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x00\x00\x00\x00\x43\x72\ \x65\x61\x74\x65\x64\x20\x77\x69\x74\x68\x20\x47\x49\x4d\x50\x64\ \x2e\x65\x07\x00\x00\x18\xa1\x49\x44\x41\x54\x68\xde\x35\x99\x67\ \x7c\x55\x55\xfa\xb6\xaf\xbd\x4f\x4b\x4e\x7a\x25\x09\x25\x09\x09\ \x1d\x44\x91\x26\x8a\xe8\x20\x4a\x71\x44\x01\xb1\x2b\x16\xb0\xcc\ \x60\xc1\xae\xef\x08\x63\x77\xfe\x3a\xa2\xa0\x88\x0c\x16\xac\x88\ \x0a\xa2\xa0\x02\x4a\x13\x51\x40\x40\x7a\x42\x4d\x02\x49\x48\x2f\ \x27\x27\xa7\xee\xfb\xfd\xb0\xb7\x9f\xf2\xcb\x29\xfb\xac\x67\xad\ \xfb\xb9\xcb\x7a\x8c\xbc\xe8\x70\xcd\xef\xdc\xc1\x75\x07\x80\x10\ \x3c\xd0\x7b\x04\x1b\xbb\xee\x66\xcf\xe6\x28\xae\x19\x49\x30\xa6\ \x83\xfa\x39\x30\xe3\x1c\xe8\x0d\x0c\xee\xf0\x32\xe3\xd9\x08\x2c\ \xc9\x83\x48\x36\x0c\x38\x08\xe3\xc5\xee\x3b\xe0\x9b\x42\xd8\x0d\ \x7c\xb0\x03\xd2\xbe\x06\xf7\x27\x5d\xe0\xf4\x28\x70\xf9\x20\xe5\ \x04\x47\x56\xed\xa0\x6a\x84\x49\x69\xd8\x22\x73\x85\x9b\xb4\xc7\ \xa6\x42\x73\x6f\x02\x23\x9b\x49\x78\x66\x09\xee\x6f\xc3\x84\xa3\ \x26\xbe\x7d\x97\x62\xb5\xe7\xa3\x59\x3f\x62\xae\xf6\xf3\xf6\xab\ \x11\xae\x5f\x94\x8f\x7b\xe5\x85\x7c\x78\xaf\x9f\xa9\x89\xaf\x52\ \x7b\xf6\x3a\x1a\xc9\x67\xe0\xb7\x43\xd9\xfe\xf7\xcf\x30\xbf\x6b\ \xde\xcf\xdf\x6a\x81\xb9\x40\x03\x6c\xe9\x7a\x88\x7f\x54\x47\x71\ \x7d\x0d\x9c\xee\x0f\x87\xa1\x79\x3f\x1c\xc7\xcf\x50\x60\xc6\xbf\ \x2c\xf8\x4f\x7f\x68\x1e\x00\x1d\xbd\xe0\x60\x0a\x08\xb2\x42\x70\ \x3e\xb0\x28\x0c\x49\x19\x10\x9e\x0a\x34\x74\x03\x6e\x82\xf8\xc3\ \xd0\xf2\x02\x7d\x2f\x99\xc1\xa8\x45\x09\x14\xd7\x42\x5a\x38\x0e\ \x1d\x6d\x44\x18\x88\xf7\xb7\x97\x30\x27\x7e\x47\xa8\xde\xc0\xb7\ \x29\x87\xd6\xa3\x23\xd0\xa9\x2c\x5c\x8f\x4d\xc0\x38\xe4\xa1\xf7\ \xeb\x29\xf0\x99\x8b\xe3\xea\x46\x60\xed\x83\x64\x86\xfb\x91\x1c\ \x4d\xa7\xa6\xc5\xa0\xe0\x64\x22\x93\xdf\x9a\x8b\x59\x19\x8c\x92\ \xb8\x0d\xc8\x81\x78\x14\xc2\x64\x91\x91\x68\xc2\x56\x17\xb8\xce\ \x62\x8d\x83\x0d\x37\xc1\x68\x82\xdc\x70\x77\x02\x2c\xcc\x05\xf2\ \xc1\x30\xe0\x9c\xcd\x1c\x7b\xb3\x8d\xde\xcf\xc2\xae\x3e\xb0\x0b\ \x88\xb9\xc1\x9d\x03\x81\x81\xc0\xd3\xfb\xc1\x3c\x03\xec\x43\x44\ \x11\xb3\x49\x9a\xf3\x24\x7c\x9c\x45\x6c\x8c\xa0\xcb\xef\xb8\xe8\ \x0a\x94\x63\x1a\x3e\x7c\x5d\x93\xd1\xf8\x16\xd2\x9a\x9b\x70\x75\ \xf6\x84\x68\x6f\x28\x3d\xc3\xd0\x4b\xcb\xc9\x8c\x2c\x62\xdf\xb2\ \x9d\xdc\xb2\xfb\x08\x1e\xef\x11\x16\x56\x26\x71\xdb\xb1\x5f\xf8\ \x57\x82\x87\x65\x56\x0e\x2c\xd9\xde\x45\x5a\x81\x74\x27\x6a\xac\ \x43\x17\xcb\xa3\xd8\xa3\x48\xd9\xf9\xd2\xe0\x2e\xda\x7d\x08\x0d\ \x94\x57\x7f\x84\x90\x0a\x3d\x52\x62\xa2\x94\x9a\x2c\x0d\x34\x54\ \x7f\x00\x5d\x27\xf4\x80\xd0\x0f\x42\xaf\x0b\xad\x15\xaa\x8d\xa3\ \xaf\x84\xaa\x02\x28\x7e\xef\x00\x89\xe7\xd5\x41\x95\x9a\x39\x25\ \x8b\x8d\xb2\xcc\x0f\x14\xfc\x0d\xc5\x6f\xf0\x49\xcc\x96\xd8\xa1\ \x28\x87\xa4\xbf\x77\x97\x26\xa3\x76\x36\xa9\x9c\x46\x9d\xa0\x42\ \xed\x4f\xa5\xc9\x1a\xd8\x4f\x07\x8d\x66\x3d\x52\x70\x4c\x4a\xfe\ \x51\x67\xb6\xf8\xb4\xe3\xdc\x1b\x34\x3f\x7f\xa9\x46\x0e\x8d\xeb\ \x92\xfc\x72\x99\x59\xde\x36\x82\x35\xc0\x71\xc8\xf8\x16\x26\x07\ \xa2\xb8\x3c\x1e\x20\x09\xc6\x9d\x65\x75\x29\xcc\xa9\x8f\x30\xe4\ \x66\xa0\x2a\x19\x22\x3e\x88\x87\x69\xb9\x5e\xfc\x38\x00\xae\x06\ \x1e\x04\x2e\x8c\xdb\xd0\x3a\x5f\x90\x6c\xc0\x84\x38\x74\xab\x07\ \x1e\x39\x88\xc6\xfe\x17\x3f\x2f\x93\xce\x12\x0c\xda\x31\xac\x3c\ \x12\x67\x8f\x20\x3e\x35\x0c\x1c\x00\x22\x18\xf8\xb0\x36\xf6\x83\ \xc4\x44\x7c\x74\x12\x25\x4e\xb1\xf9\x09\xc9\x2b\xf2\xe0\xf0\xcd\ \xf4\x30\x3b\x99\x68\xee\x87\x0b\xc6\x53\xf0\xa6\x20\xdf\x4b\x81\ \x37\x99\xe2\x16\x93\x7b\x3c\x07\x31\x17\x0e\xe9\x44\x61\x60\x00\ \x18\xc5\x30\x39\x0e\x74\x58\xd0\xe5\x04\xc1\x5b\xa1\xd5\x03\x33\ \x16\x00\x1b\x92\x41\x01\x48\x6d\xe5\xf4\xdc\x28\xf7\x3c\x0d\x1b\ \x80\x33\x67\xa0\xe8\x13\x48\xfe\x10\xfa\x76\x40\xb3\x01\x9e\xf5\ \x90\x38\x2a\x17\x96\x65\x63\x5a\x10\xfb\x5f\x13\xfc\xed\x23\xa0\ \x02\x38\x06\x04\x60\xdf\x38\x4c\xd3\x0b\x7d\x77\x03\xc2\x45\x3b\ \x66\xa0\x1f\x84\x3a\xf1\x70\x9c\x46\x92\x88\x64\x9c\x02\xab\x15\ \xd1\x04\xf1\x54\x0a\x4f\x0f\x85\x13\x82\xba\x22\x16\x1d\x9b\x42\ \x63\xd5\xc5\xb4\x9b\x01\xc2\xed\x43\xe1\x93\x76\xd4\xb1\x12\xe9\ \x69\x74\xa8\x19\x6d\xaf\x40\xba\x18\x59\x43\xd0\xd3\x21\x54\xbf\ \x18\xc9\x9f\x2a\xb9\xfc\x52\x8a\xa9\x8a\x15\x86\xae\x10\xfa\x57\ \x0c\x35\xcd\x46\x56\x4f\xa4\x81\x09\x52\x41\x9a\xa2\x8b\xd0\x71\ \xa1\xf8\xcd\x3e\x89\xc7\x25\x56\x4a\x3d\x67\x4b\xcb\x90\x36\x21\ \xdd\xe4\x96\x98\x22\xf1\x9e\xc4\x42\x89\xc9\xd2\xa7\x48\x2c\x95\ \x28\x73\xe0\x85\x2c\x5e\x56\x05\x96\xda\x78\x45\x4a\x19\xae\x06\ \xa4\x2a\x1a\x14\x4b\xb9\x43\xba\x20\x59\x61\xe3\x65\xfd\xb3\x58\ \x7a\xce\x1f\xd6\xdb\xee\x23\xda\x53\x34\x5f\x9c\xe8\x40\x0d\xfb\ \x90\x1e\x42\x7f\xb6\x23\x6b\x13\xd2\xf9\xa6\xc2\xff\x41\x95\xeb\ \x90\xd2\x3d\x92\xd1\x53\x2a\x71\xab\x6a\x13\xfa\x8f\xd0\xea\xb3\ \x28\x3a\x16\x89\xf3\x24\x6e\x92\xb8\x56\x62\x92\x42\x4f\xb8\x55\ \x65\x21\xdd\x80\xc4\x1d\x12\x1f\x49\xfc\x21\x99\x9f\x4b\x85\x57\ \x4b\x6b\x90\x26\xba\x25\xde\x95\xc5\x59\x89\x72\x69\x46\x0f\xa9\ \x70\x8c\xe2\x54\x2a\x4a\x48\x3a\xb7\x97\xc4\x1a\x95\x63\x29\x98\ \xfb\xb0\xe4\x9a\xa5\x83\x58\x6a\x64\x9b\xe4\x2f\x52\x83\xf7\x2b\ \xad\xa1\x55\xe3\x86\x48\x1f\x12\x50\xa5\x7b\xad\xf4\xb8\x21\x2a\ \x6a\xd1\xce\x2a\xa4\xb5\x68\xb3\xec\x82\xac\x6b\xd1\x9a\x18\xd2\ \x55\x48\xd9\x48\x69\x86\xaa\x3f\x44\xff\x16\x3a\xb5\xc6\x94\x92\ \xd2\x25\x86\x38\x8b\xbd\x47\x32\x6e\x56\xe4\xca\x62\x9d\x6a\x41\ \x9b\x84\x82\x3f\x20\x25\x96\x38\xef\x7f\x26\xb1\x4c\xe2\x5b\x29\ \xfd\x46\x69\x2b\xd2\x85\xa5\x12\x6f\x49\x6c\x96\x78\x43\x7a\xd8\ \x23\xf1\xaa\xc4\x26\x69\xd8\xc5\x12\x1f\xa9\x91\x98\xe4\xba\x5d\ \x71\xf6\xa9\x81\xb0\xc2\x5c\x2b\xb9\x6e\xd1\x7a\xc2\xba\x17\xe9\ \x3b\x62\x6a\xf0\xbd\x22\x0d\xf5\xab\x82\x45\x22\xb2\x1b\x55\xef\ \x40\x91\x25\xe8\xcb\x38\x52\x11\xd2\x14\xa4\x9b\x4c\xc9\x6b\x4a\ \x49\x1e\x55\x2c\x43\x5f\x06\x91\xee\x43\x72\x17\x4a\xfc\x4d\x62\ \x92\x64\x9e\x27\x2b\xbb\x50\x81\x95\xe8\x47\x0b\xad\x11\xfa\x46\ \xe8\x90\x90\x16\x20\x65\xa4\x49\xdc\xee\x2c\xf2\x6b\x89\xef\xa4\ \xec\xcb\xa5\xf7\x0d\xa9\xa0\xbf\xc4\xff\x24\xf6\x4b\xbd\x2f\x95\ \xf2\x87\x4b\xfc\x2c\x15\x3d\x28\x31\x47\x61\x02\x12\x0f\xa9\x93\ \x5a\x9d\xa6\x46\x32\x27\xa9\x9a\x4a\x7d\x8d\xb4\x84\x98\x54\x38\ \x5e\xe2\x62\x45\xf8\x42\xaf\x13\x93\xa1\xc7\x11\xdd\x40\xa3\x20\ \xbe\x07\xdc\x33\x93\xc0\x17\x81\xd0\x00\xf0\x1d\x22\xb8\x3c\x82\ \xd5\x08\xc9\xff\x01\xca\x52\x80\x9e\xe0\xe9\xc0\xba\xfe\x34\x07\ \x66\x84\x38\x3a\x0a\x48\x80\x0e\x5b\x5d\x70\x01\x05\x40\x7e\x0c\ \x52\x6a\xc0\xfc\xb7\x1b\x3e\x1a\x01\x91\x99\x40\x37\xa0\x0e\x92\ \x0e\xc3\x9d\xcf\xc3\xfb\xf9\xd0\xfe\x36\x10\x83\x5e\xf7\xc3\xd1\ \x6f\x80\x0e\x2c\x52\x30\xe9\x0e\xfc\x8f\x0a\x9e\x24\x42\x23\x99\ \x74\x52\x46\x37\x06\xf3\x13\x49\x7c\x03\xc6\x25\x9c\xd2\x60\xd6\ \x53\x42\x12\x60\x54\xfd\x82\xaa\xfa\xc2\xd6\x4c\x98\xbd\x15\x12\ \xe7\xb9\xc0\x63\x41\xc8\x84\xac\x38\x24\x00\xbf\x00\x5d\x81\x01\ \x06\x8d\x17\x19\x6c\xb8\xdc\xe2\x54\x81\xfd\x52\x9f\xd3\x90\x17\ \x86\xf6\x1c\xd8\x92\x0a\x85\xc0\xc5\xe5\xe0\x8d\x81\x2b\x05\x94\ \x01\x91\xd3\x90\xb8\x20\x17\xde\xeb\x0f\xa1\xa1\xc0\x40\xe0\x0f\ \x18\xbe\x1c\xf6\x0f\x86\xce\x47\x81\x1a\x9b\x3a\x69\x02\xb6\x42\ \xca\x6e\x08\x05\x89\x45\xd7\x53\x8b\x81\x17\x93\x6c\x7e\xc0\x74\ \x7d\x4c\x34\x3e\x8f\x43\x14\x73\x12\x17\xa9\x40\x42\xfa\x19\x8c\ \x52\x19\xca\x05\xba\x20\x2e\x06\xee\x3a\x0c\xfe\x76\x30\xbf\x04\ \x3e\x07\x32\x80\xcb\xa0\xf9\x2a\xd8\x71\x11\xb4\x87\xa1\xdf\x09\ \x83\x82\xed\x90\xb1\x0a\xd8\x9e\x08\xa6\x05\xe3\xc3\xfc\xf4\xb1\ \x18\x14\x85\xdc\x1b\x80\x03\x06\x74\x26\xa2\xe2\x04\x3a\x1f\x69\ \xc7\xec\x1b\xc3\xdd\x28\x5c\x8b\x4d\x8c\xd5\xbd\xa1\xf1\x6a\xa0\ \x18\x68\x05\xb2\x80\x1e\xc0\x66\x48\xff\x11\x46\xee\x04\x3f\xf0\ \xdd\xe5\x28\xf2\x36\x27\x29\xc1\x02\x0a\x68\x04\xc2\xb4\x51\xc0\ \x29\xa0\xbb\x51\x49\x81\xf1\x14\x46\x49\x03\xc6\x45\x42\x5d\x81\ \x66\xc0\x04\x6e\x06\xa6\x6e\x80\x84\xc7\x9d\xcd\x99\x0a\xca\x85\ \x78\x1d\xb8\xd7\x02\x95\x69\xd0\x91\xe5\x54\x98\x68\x0b\x27\x89\ \x58\x0f\xfc\xc0\xb6\xf9\x21\x2e\x3a\x08\xc6\x38\x2f\xd4\x74\x07\ \x86\x01\x17\x01\x67\xc0\xa8\x85\x9e\xab\x88\xdd\xd6\x8c\xbb\x05\ \xd8\xe9\x81\x5f\xde\x06\x75\x01\x0a\xc1\x58\x05\xd7\x3e\x0b\x97\ \xc6\xed\x47\xee\x02\xfe\x04\x36\x4f\x27\xca\x62\x56\x92\x4e\x77\ \x1b\xd8\xa4\xb2\x01\x1f\x75\x98\xf9\x8f\x42\xcd\x1c\x5a\xcc\xe1\ \xb8\x83\x98\x44\xb0\x68\x02\x7c\xc0\x1e\xe0\x9a\x74\xa0\x08\xa8\ \x05\x16\x79\x30\x82\x83\x70\xd3\x08\xc9\x95\xd0\x91\xe8\x14\x60\ \x02\xe9\xf6\x8e\xfa\x0e\x50\x7b\x67\x88\x46\x20\x94\x05\x89\x7d\ \xa3\x50\xd3\xe2\x7c\xae\x0b\x60\x80\x4a\xe0\xf8\x60\xdc\xcf\x54\ \xd9\x4d\x45\x1c\xf8\xc2\xfe\x21\xcf\x09\x98\xf9\x13\x5c\xe9\x3c\ \x36\xd9\xf9\xea\x70\xc0\xb7\x06\xf7\xba\xc5\x0c\xe6\x01\xd2\x71\ \x93\x42\x94\x44\x0e\x63\x31\x91\xfa\x9a\x67\x68\xe7\x26\x16\x58\ \xc9\xb8\x5d\xce\x77\xe5\x34\xea\x21\x60\x7b\x29\x8c\xbd\x0c\xc8\ \x05\xd6\x47\xa1\xf2\x28\x14\xc3\xe9\x3b\x0c\x62\x45\xb5\xc4\x32\ \x6a\x09\xf9\x21\xec\x03\x9f\x0f\x3c\xfd\xa0\xc6\x03\x41\xa0\x3c\ \x0f\x0a\xbf\x12\xe9\x5b\x1a\x61\xc3\x17\xf0\xe7\xaf\xd0\x2c\xa8\ \x2e\x80\xa6\x49\xc0\x14\xec\x4f\x36\x00\x27\x81\xef\x60\x76\x0d\ \xfc\xd3\x81\x45\x14\x38\x88\xcd\x1e\x06\xe0\x8b\x62\x10\x26\x8f\ \x18\xb5\x24\xd0\x8c\x41\x3a\xb3\x39\x0c\x84\x28\xa1\x37\x70\x15\ \xe0\x6e\xc0\xc4\x87\x45\xcc\x39\x91\x62\x20\x25\xe8\x3c\x34\x05\ \x88\x79\xc0\xf4\xc2\xe8\x36\x22\xb7\x59\x54\x03\x1d\x06\xb8\xbd\ \xa0\x64\x68\xf4\xd8\x20\xf3\x3b\x48\xf7\x01\x4d\x09\x10\xbd\x10\ \x32\x87\x05\x09\xb9\xcb\xf0\x76\x82\xe7\x87\x72\x98\xb7\x1b\x6a\ \x9f\x71\x8e\x7b\xa7\x8d\x9d\x8b\x6a\x60\x2c\xd0\xe2\xf4\x7b\xb2\ \x53\x40\xa7\x43\x83\x56\x04\xa8\xc7\xcf\x59\xb2\x30\x38\x8b\x9f\ \x6c\x4e\x72\x3e\x71\x92\x8c\xf7\x71\xa9\x1b\x85\x0c\xc4\x5d\x48\ \x0c\x37\x50\xee\xb4\xdd\x2c\x60\xf8\x77\xc0\x07\xb6\x25\x22\x16\ \xb5\x51\xb0\xb4\x88\x9e\x1f\xb6\xd3\xd3\x48\x00\xb7\x0f\x92\x5b\ \xe1\xfc\x5a\x62\x33\xe1\xf4\x14\xfb\x24\x3d\x40\xe9\x72\x70\xcf\ \x4d\x83\x53\xbd\x21\x9e\x47\x12\x9d\xe0\x09\x40\x34\x19\x62\xfd\ \x9c\x9e\x6a\x06\x32\x81\xc1\x50\x57\x85\x7a\x74\x80\x1b\x8c\x6c\ \xe7\xad\x0c\xc0\xed\xc0\xab\xdc\x07\x64\xe0\x61\x0f\xd9\xe4\x13\ \xe7\x02\xdc\x34\x92\x4a\x10\x94\x0f\x74\xe0\x36\x77\x61\xf8\x94\ \xa3\xb1\xd4\x13\x02\xfa\x02\xcf\x34\x42\x97\xbb\x80\xad\x29\x30\ \xb9\x9d\xce\xfb\x21\xbc\x07\xd2\x3f\x32\xa0\x5c\x50\xef\x03\x2b\ \x05\xa2\x3d\xed\x10\x43\x0b\x3c\xb7\x8d\xdf\x9f\x86\xf4\x3a\xe8\ \x33\xd5\x80\x6d\x53\x81\xee\x0e\x36\xdd\x0e\x5e\x92\x9d\xf3\x8e\ \xd9\xcd\x4f\x9e\xa3\x38\x1f\xc3\xa2\xb7\xd1\x35\x60\xec\x75\x5a\ \x27\xe4\x6c\x62\x05\x30\x6f\x18\x58\xf7\x01\xa3\x09\x90\x88\x89\ \x9f\x1d\x24\x53\x80\x9b\x16\x44\x2a\x06\xed\x80\x2b\x6f\x5e\x74\ \xde\xb9\x58\xcc\x04\xae\x6b\x83\xbc\x23\xce\xf3\x7f\x8b\xc0\x2e\ \x2f\x9e\xb4\x38\x5b\x67\xc0\xaf\x77\x43\xcb\x3f\x40\x17\xc5\x49\ \x09\x86\x31\xab\x23\x10\x8a\x80\x51\x8b\x75\x63\x3b\x4d\x43\x20\ \x7b\x0f\x24\xaf\x32\xe0\xac\x1b\x38\x0d\xb4\x81\xcf\x05\x39\x35\ \x30\xf8\x28\xf4\xf9\x12\x9a\x16\x40\xc4\x0b\x1a\x0d\x94\xd8\x05\ \x46\x57\x60\xf4\xb4\x20\xd5\xd6\x4b\x92\x81\x6d\xc0\xd2\x12\x08\ \x3c\xe7\x54\xe6\xc3\xcd\x29\x22\x18\x94\xf2\x3d\x09\xf8\x68\x24\ \x15\xe1\xe6\x00\x60\x3c\x2e\x34\x06\x98\xb0\xd7\xc1\xe9\x61\x60\ \x08\x34\x64\x40\xd6\x63\x60\xec\x02\xfc\x49\x58\x53\x23\xec\xb8\ \x39\xca\xee\x7e\x10\x31\xe1\xbc\x6a\x28\xfd\x15\x92\xc3\x70\x70\ \x3a\x54\x7b\xa0\x67\x08\x06\x6c\x00\xef\xcf\x6e\x62\xbd\x5d\x44\ \x0b\xc3\x44\x4a\x21\xad\x19\x8c\x9f\xbc\xf0\x4e\x1a\x54\x5e\x65\ \x0b\x13\x5d\x9c\x86\xef\x07\xde\xb7\x20\xfb\x10\x3c\xbc\x0d\x06\ \xc7\x61\x25\xb0\x22\x07\xea\x9e\xb0\x29\xc8\xa8\x05\x8d\x27\x48\ \x29\x1e\xca\x70\x73\x14\xa3\xcf\xab\xd0\xd2\x0b\xac\x54\x1a\x1a\ \x5e\xc2\x68\x8e\xa3\x0a\x13\x06\xaf\xb7\xc5\x96\xdf\x6d\x24\xd4\ \x7f\x0c\x1b\x7d\x30\xfd\x59\xe0\x95\x3c\x9b\x3e\x53\x92\x60\xc8\ \x4e\x8e\x7e\xda\xcc\xa6\x02\x1b\x30\xa5\x0e\xc1\xa4\x01\xed\x40\ \x7f\xc0\x72\x40\x97\xb2\x1d\xdc\x73\xbd\x18\xdb\x87\x43\x60\x18\ \xf6\xf5\x45\xa1\x73\x5a\x47\x1c\x85\x1f\x07\xb8\x11\x11\x0c\x63\ \x09\x5c\xf1\x5f\xd8\x52\x02\xc1\x73\x81\x09\x58\x4c\xc4\x34\x1f\ \x02\xb7\x0b\x2b\xf2\x10\x15\xf4\x27\x07\x51\xe5\x3a\x4b\xbf\xd4\ \x67\x20\xb0\x0d\x3c\x60\xe8\x35\xd4\x36\x13\x52\xd7\x39\xc7\x9a\ \x0a\xbc\x66\x6f\xc4\xe6\x75\xb0\x3c\x03\x1e\x5d\x09\xc5\x37\x26\ \x43\xa8\x14\xe8\x05\xae\xc3\x30\xb1\x8c\xda\xd7\xa2\xfc\xd6\xcb\ \xee\xcf\x7c\xa7\x80\x11\x0d\x90\xf5\x36\xf0\x41\x3e\x9c\xfc\xbb\ \xb3\x78\xb7\xf3\x37\x08\x29\xef\x40\x7b\x8a\x53\x72\x7f\xe0\x38\ \xe2\x7a\x0c\x42\xb6\xbe\xf8\x1f\x86\x60\x0c\xb8\x11\xc8\x43\x0c\ \x25\x46\x04\x8f\x71\x2d\xf4\xad\x27\x7c\xf8\x67\x5a\xc8\x23\x86\ \xa8\xc5\xc0\xed\xde\xce\xe0\x41\xf7\x81\x3e\x46\x65\x01\x14\x38\ \x81\x74\x1c\x6d\x17\xea\xfc\x1a\x29\x0d\x69\x80\x5b\x07\x4e\xa1\ \xfb\x85\xaa\xca\x90\xce\xc3\x0e\x58\x8c\x95\x98\x29\x65\x75\x51\ \x60\x17\xfa\x40\x68\x95\x50\x5d\x25\xd2\x39\xa9\x12\xe3\x24\x1e\ \x96\x78\x59\xe2\xff\x6c\x07\x3c\x7c\xac\xf4\x32\x52\xde\x60\x89\ \x47\x25\x5e\x93\xf2\xc6\x49\xfe\xcb\x24\x96\x4b\x3c\x23\xf1\x93\ \x94\x76\x8f\x64\x0e\x74\x2c\xfe\xcb\x12\xdf\xe8\x18\x52\x9c\x46\ \x29\x65\x9c\x54\x92\x24\xb1\x54\xad\x9c\x50\x35\x31\xad\x45\x6a\ \x20\x2e\x93\xad\xd0\x1e\x87\xa4\xa3\xf6\x49\xe4\xd4\xc1\x67\x57\ \x02\x8f\x03\xc7\x0b\x18\xd0\x2f\x9f\xe9\x47\xe0\xa5\xde\xf0\xc7\ \x3a\x60\x4c\xf8\x2f\xb5\x80\x40\x1b\x49\x6b\xa1\xaf\x20\x2f\x0a\ \x39\xb7\x02\xfb\x66\x01\xf7\x39\xb2\x1c\x85\xe4\x23\xe8\xa3\x65\ \xf0\xf0\x4f\xf0\xc2\x30\xa8\x9d\x65\xbf\x97\xbf\x0e\xd2\x76\x42\ \xf0\x5e\x70\x1d\x87\x82\x57\x6c\x80\x06\x93\x20\x2b\x44\x8c\xbe\ \x88\x5c\x70\x2f\x24\x9d\x3a\x76\x91\x49\x53\xfb\x67\x50\x97\x06\ \xa3\xee\x23\xd5\x5c\x00\xec\xe0\x24\xb0\x18\x13\x53\x33\xe1\x50\ \x2a\xc4\xdb\xed\x93\xef\xb1\x11\x2a\x3c\xd0\x7e\x1d\x30\xa8\x0a\ \x3a\xdd\x8c\x98\xe5\x61\x58\x1b\x7c\x92\x0d\xdf\x7f\x19\x87\x3b\ \x37\x41\xd6\xef\x90\x17\xc1\x2a\xb6\x59\xb2\x7b\x3b\x70\xc8\x07\ \xc6\x01\x60\x37\xa4\x7d\x41\x7c\xee\xeb\xb4\xfc\xb1\x14\xce\xc6\ \x61\x76\x16\xb4\x5f\x64\xab\x6c\xea\x7a\x98\xb3\x01\x4e\xcc\xb1\ \xbb\xac\x74\x83\xcd\x54\x64\x43\xef\x03\x70\xfe\x59\xe2\xb4\x12\ \x66\x02\x8c\xf8\x89\xac\xcc\x7b\x71\x11\x63\x1f\x59\xd4\xb4\x6f\ \x87\xce\x0c\x78\xe0\x1d\xba\x78\xdf\x60\x32\x27\xed\xaf\xea\x2c\ \x7a\x44\xa8\x79\x0b\xd2\x6c\xa4\xbb\xd1\x67\x31\xb4\x40\x48\xaf\ \x21\xf5\x44\x32\x0b\xa5\x9b\x4c\xad\xb0\xd0\xb3\x42\x2b\x85\x1a\ \xf6\x20\xbd\x88\xda\x8f\xa1\x4f\x84\x76\x75\x22\xcd\x44\x1a\x8d\ \x74\x77\x92\x6a\x8e\xd8\x01\xcb\x9a\xed\x95\xdc\xa5\x12\xb3\x24\ \x5e\xb7\x21\xf7\x86\x5b\x9a\xd4\xdd\xce\xf4\xec\x95\xa6\xb9\xa5\ \xbc\xa1\xb2\x08\x48\x7d\xc7\x4b\xf3\x3d\x92\xb1\x51\x41\x24\x15\ \x0c\x94\x6e\xf1\xaa\xd1\xa8\xd3\x7e\xa4\xbd\xc8\x4e\x9b\xf7\x64\ \x49\x77\x23\x0d\xef\xa3\x33\x54\xca\x0c\xb7\x9b\x04\x01\xcb\xeb\ \x58\x9f\x6f\x0d\x4a\xf6\xc0\x0a\xa0\x75\xa2\x23\xc4\x78\xe1\x93\ \x3c\xa6\xfd\xdb\x66\xa9\xe3\xc0\x67\xe7\xc2\x2f\x4f\xc2\xbe\x12\ \x9b\x1f\x9a\x12\xa0\x7a\x11\x34\xac\x87\x23\xef\x74\xe0\xcd\x84\ \x7e\xd7\x27\x63\x2c\x98\x06\xb1\x09\xf6\xce\x1b\xf5\xf0\xc8\x12\ \x08\xc5\x61\xcd\x9d\x0e\xe7\x6d\x86\x09\x31\xa8\x1d\x83\x41\x10\ \xda\xf2\x6c\x37\x61\x96\x11\x02\x38\x3b\x01\x3c\x11\xd2\x53\xbe\ \xc7\x72\x7e\x7b\x3b\x13\x89\xbf\xf3\x03\x54\xe6\x41\xef\x32\x0a\ \x86\xf5\xc0\xd4\x29\x11\x00\x1a\x8a\x80\x30\xd0\x2c\xce\x59\x0a\ \xe7\x00\x5f\xf4\x05\xa6\x01\x66\x9d\xed\xa6\xde\x82\x69\x9f\xd8\ \x26\xd3\x72\xf4\xb9\xcc\x11\x63\x0b\xd8\xef\x82\x3d\x3e\xe8\xd2\ \x00\x99\x17\x16\xc1\xf2\xa9\xf6\x26\x50\x0f\xac\x84\xe9\x6f\xd0\ \xf1\x58\x1b\x7c\x9b\xe1\x38\xe7\x3a\xb8\xf8\xbf\x50\x93\xe8\x10\ \x76\x13\x24\xc8\x76\x2f\x29\x7b\xc8\xa0\x02\xe2\xd7\xc3\x0f\x17\ \x62\x5e\x3e\x8b\x62\x3e\xa0\x17\x10\xc1\xe4\x20\x43\x09\xad\xdb\ \x08\xcd\x26\x8c\x01\x33\xa1\x4c\x8c\x05\x76\x25\x3a\x8a\x5e\x0c\ \xbe\xcd\xf0\x78\x15\x6c\x04\x0e\xdd\x08\x64\x05\x6c\xb5\x68\xea\ \x81\xe7\x49\x17\x33\xd7\xc3\x20\xcb\x76\xcc\x3e\xa7\x10\x0f\x10\ \x01\x86\x1e\x87\x8c\xcb\xdc\x70\xf4\xaf\xe0\xeb\x05\x1a\xe1\xf2\ \xfd\xe8\xe5\x0e\xfc\x2f\x02\xdb\x6f\x03\xae\xb0\xe9\x76\x64\x3d\ \x54\x25\x3a\x2a\x6f\xc1\x59\xcb\x66\xea\xdc\x1d\x58\x18\x74\xd0\ \x07\x6a\x27\x41\x69\x8c\x94\x21\x8f\x30\x90\x1a\x92\x9c\xdf\x5c\ \x1b\xef\x43\x78\xdd\x2a\xa8\xc9\xc5\xc4\x65\x92\x07\x18\x09\xb6\ \xa2\x33\xd4\xb6\xb1\x05\x6b\xed\x84\x5d\x56\x0a\x4c\x32\x00\xc0\ \x3a\x0f\xaa\xc6\x90\xfc\x9c\xcd\x03\x38\x11\x20\xd7\xb1\x80\x03\ \x42\x90\x71\xbd\x17\xfe\xbc\xd0\x71\x6e\x2e\x7b\x03\x2e\xdd\x4d\ \xe4\x83\x00\x34\x80\xb1\x74\x08\xc4\xaf\x43\xe4\x80\x6b\x05\x5c\ \x12\x84\xca\xae\x8e\x2f\xe9\x84\x90\xcb\xde\x9d\x92\x33\x44\xe9\ \xa0\x85\x24\xb0\x86\x43\x52\x12\xdc\xd5\x88\xc1\x6b\x74\xa3\x83\ \x14\x22\x54\x60\x50\x13\xbd\x04\xbe\xee\x83\xf9\xdb\x4d\x16\xdb\ \x80\xc3\x5e\x68\xbe\xd5\xd1\xa1\x69\x10\x1a\x69\xef\xf2\x7a\xe0\ \xd8\x0b\x31\x98\x52\x03\xae\x3f\xec\x33\xa8\xf0\x91\x7a\xc8\x2e\ \xa4\xc5\xd1\x69\x01\x3d\x7f\x04\x4e\x7b\x9c\x73\xaa\xb2\xd3\xcd\ \xf0\xd5\xb4\x7d\xd0\x48\x3c\x03\x8c\x4f\x81\xf6\x5c\xe0\x24\x06\ \xeb\x60\xfa\xff\xa0\x17\x50\x59\x07\xfc\x66\x7b\xf0\xdc\xd3\xb6\ \xb5\xca\xe9\xc0\xc7\x5e\x52\xc1\x16\xe1\x8f\x53\xed\xbd\xc9\x59\ \x4c\x8e\xf1\x23\x7e\x5c\x00\x24\xe2\x82\xce\xcb\x70\xd5\xbe\xc8\ \xbc\x20\x50\x09\x1c\x48\x80\xd6\x12\xd8\x3d\x1c\x0e\xe6\xdb\x4b\ \x31\x80\xb2\x64\x68\xba\x06\x7a\x8c\x6b\x23\x3a\xa0\x82\xf8\x8c\ \x08\x47\xc7\x42\xb3\xdb\x5e\xb2\xdf\xb6\x87\xe4\x1b\xe0\x2b\x8b\ \x42\xc3\x29\x48\xaa\x80\xd1\x95\x04\xdf\x0b\x53\x57\x0c\x1e\x13\ \x12\xf7\x00\x3f\xb9\x81\x1d\x90\xf6\x13\xfc\xb7\xda\x86\xf3\x91\ \x20\x1c\xfc\x1d\xac\x13\x70\xd9\x2f\x30\x0a\x70\x45\xe1\xfb\x10\ \x1e\x26\x03\x6d\x18\xfe\x15\x50\xd0\x08\xa5\x11\x4c\xef\x1a\x5c\ \x15\xd3\xa9\x26\x8b\x0c\x3c\x64\xf0\x2b\xc6\x34\xa1\x98\xd3\xe7\ \x25\x0e\x54\x5a\x1c\x93\x5d\xef\xc0\x26\xc1\x69\xe6\xae\x4e\x4b\ \xfe\x95\xef\xb3\x9d\xbe\xf8\x2b\xb8\x02\x0c\x88\x43\x5e\x04\x9a\ \x0c\x30\x3d\x90\x64\x42\xd0\x70\xd2\x6b\x1c\x5c\x07\xc0\xac\xb0\ \xe1\x6b\x0d\x00\xb3\xd5\x89\xd4\x01\x87\x92\xba\x3b\x8b\x71\x3b\ \xff\x6f\xea\x03\x4d\x1d\x30\xa4\x1a\x4e\x5a\xf6\xae\xf5\x07\x56\ \x26\x13\xab\x7e\x04\x77\x47\x0d\x78\xb7\x62\xfc\xd3\x42\x27\x0d\ \xe8\x16\x87\x87\x4f\x42\x61\x1d\x94\xf7\x87\x25\xe9\x7f\x99\x67\ \x18\xdb\x09\x17\x1e\x06\x6f\x1d\xb8\x9b\x0c\x4e\x8d\x16\x47\xbb\ \xc2\x19\xd3\x2e\x22\x0b\x28\x8c\x83\xab\x0d\x7a\x1d\x36\x71\x9d\ \x4e\xa2\xa3\x28\x08\xfd\xe2\x18\xc9\xd0\x6a\x40\x72\x04\x32\x0f\ \x01\x6d\x2e\xf0\x5a\x90\x21\xac\xae\x60\x76\x02\x27\x9c\x23\x6d\ \x72\x6e\x17\x9a\x9c\x4b\xfa\x46\x03\x12\x65\x53\xe3\x28\xc7\x9d\ \x1e\x73\x12\x60\x93\x09\xcd\x16\x04\x4d\xc8\xb3\x60\x53\x1b\x9a\ \x25\xd4\xf4\x15\x52\x96\x7d\x4d\x1a\x5e\x63\xcf\x3c\x1e\x93\x2d\ \x8e\xd6\xbd\x48\x45\x09\x92\xbb\xc4\xf6\x59\x29\xdd\xd5\xf2\xab\ \xa1\xaf\x85\xde\x77\x66\x23\xed\xfb\x90\x6e\x47\x32\x06\x4b\x3c\ \x27\x71\x97\x74\x5e\x9a\xac\x16\xd4\x20\x64\x1d\x40\xea\x9f\x2e\ \x31\x47\xe2\x6a\xe9\x46\xaf\x62\x6d\xce\xe5\x76\x7e\x77\x7b\x4e\ \x92\x3b\xda\xbe\x1f\x5e\x89\x74\xa5\x5f\x62\xae\x7d\xb5\x6a\x3c\ \x29\xbd\xe8\x91\xe6\x23\x3d\x85\xd4\xbb\x44\x31\x96\xa9\x99\xcd\ \x8a\xf1\x95\x94\x3b\x52\xee\x9e\x31\xbb\xd8\x8c\x97\x80\xc6\x5c\ \x68\xac\xa3\x35\xdf\x6e\xde\x5e\xc0\xf4\x27\xc1\x58\xdc\x17\xac\ \x73\x1c\x00\x45\xb0\x6e\x69\xe4\x8f\xc1\xc2\x72\xfa\x23\x82\x1d\ \x1a\x39\x05\x28\xc5\x01\x5e\x21\xec\xbd\x15\xe3\xb1\x77\xf1\x2c\ \x0a\x63\xd4\x00\x87\x46\x3b\x1e\x6c\x34\x7c\xeb\xc7\xf5\xaf\x4f\ \xed\xd3\xa8\x79\x9d\x18\x93\x70\xd7\x1d\x83\xdd\x83\xa0\xc0\x24\ \xb6\x6e\x31\x01\x6e\xc6\x04\x92\x38\x82\xcb\x5c\x08\x66\x14\xd6\ \x16\x51\x5b\xbe\x8d\x72\x32\xd8\x88\x97\x29\x40\xbf\xba\x7d\x98\ \x0d\x09\x10\x6f\x05\x8e\x02\x9e\x10\xc4\x60\x67\xa9\x9d\xfd\xaf\ \xde\x0d\xe6\x4a\x0f\x58\x3d\x1d\x90\x95\x13\x9b\xf6\x2d\x1f\xbd\ \x15\xe4\x88\xdf\x2e\xa0\x15\x38\x0b\x1c\x2f\x82\xd0\x5c\xa0\xf0\ \x57\x60\x9d\xcd\x63\x1a\x0a\xcb\x06\xd0\x50\x01\x91\x81\x40\xce\ \x61\xe7\x39\xb9\xd0\x39\xc8\xc6\x6e\x04\xc0\xc0\xcd\x69\x3b\x68\ \xe5\x00\x47\x52\x69\x89\x5e\x4e\x10\x48\x21\x88\xcb\x58\x08\xbe\ \x00\x6c\x01\xf6\x3e\x41\x2d\xb9\x44\xf1\x60\x81\x9d\x11\x93\xca\ \x31\xb7\xba\xe1\x82\x9f\x81\x02\x03\xa2\x99\x30\x14\x0e\xa4\xc0\ \x13\xc7\x21\x7b\x01\x70\x36\xcb\x09\x43\x89\xc4\x1f\x28\x63\xe1\ \x0a\x9b\xc9\x32\x9d\x30\xd5\xd5\xe9\x91\x46\xe0\xc4\x18\xd0\x83\ \x16\x78\xea\x1c\x21\x1c\x04\xa1\xfb\xc8\x9e\x9f\x41\x4d\x1e\xc4\ \x5e\x39\xee\xa4\xc2\x5c\xb0\xfa\x40\xb5\xcb\xce\x57\xb6\x94\xdb\ \x91\x36\x06\xd6\xc2\x4b\x39\xa1\x1c\x2a\x00\x11\x81\x0b\xde\x82\ \xad\x49\xc4\xbf\x7c\x93\x0e\x6e\x64\x37\x06\xb5\x18\x40\x9c\xae\ \xfc\x06\xe1\x2d\x98\x3b\x3c\x50\xb2\x1a\x88\x09\x12\x5a\x38\x36\ \x0b\xfa\x06\xa0\xe7\x5c\xe7\xc6\xa6\x3d\x13\xcc\x3f\x61\xc6\x87\ \xbc\x30\x3f\x42\xa6\x63\x2e\x52\x9d\x1b\x9b\x06\x87\xb5\xf2\x1d\ \xf2\x39\x3b\x1b\x98\x71\x04\xdb\x17\xec\x06\x7a\x90\xba\x60\x22\ \x5d\x57\x83\x31\x59\x90\xfe\x81\x8d\x41\x2b\x1b\xfe\xf4\xdb\x4d\ \x4e\x93\xc3\x8b\x95\x50\x0b\x66\xe7\x20\xba\x63\x51\x08\x98\xb9\ \x0f\xc3\x40\x37\xd6\xca\x77\xe8\xe0\x16\xc2\x24\x72\x35\x01\x7a\ \x02\x73\xdc\xaf\xe2\x4e\x9a\x00\xd9\x41\x38\xf8\x2b\xd2\x30\xa4\ \x41\x48\x2f\xdb\x01\xa9\x7d\x09\xd2\x40\xaf\xe4\x2a\x92\xe8\xa7\ \xce\xfb\x0d\xbd\x1e\x43\xcb\x85\x2a\xcb\x50\xe0\x15\x14\x7e\xd3\ \xd0\xd7\x16\xfa\xde\x42\x91\xcf\x91\x35\x07\x35\x6f\x40\xe5\x42\ \x2d\x9d\xc8\x7a\xd0\x70\x02\xd8\x42\x89\xb7\xa5\x82\x0b\x14\xd9\ \x87\x34\x0b\x89\xc5\xf6\x08\xe1\xda\x22\xbb\xb1\x79\x59\xe2\x4f\ \xc9\x7b\x8b\xf4\x3c\xb2\x78\x4d\x47\x88\xab\x95\x93\xd2\x2d\x48\ \x09\x97\xc9\xa2\x5a\xe2\xa8\xe2\x74\xd8\x73\x94\x84\x71\xd2\x84\ \x54\x29\xad\x44\x1a\x93\x22\x1a\xd7\x20\x8d\xb0\x67\x22\x95\xb5\ \xa8\x7a\x1f\x52\xae\xcb\x59\xc4\x20\xb5\x5d\xe3\xd5\x8f\xad\x68\ \x6f\x33\x0a\xbf\x66\x48\x05\xc9\x12\x57\x4a\xfe\x21\xaa\x2c\x47\ \x3b\x9a\x91\x8a\x0d\xc9\xb8\x44\x72\x4f\x53\xec\xfe\x02\x55\x34\ \x22\xeb\x30\xd2\xa4\x44\xc9\x78\xca\x19\xf6\xcc\x97\x86\x95\x48\ \xfb\x90\x0a\xa6\x48\xfc\x20\x79\x2f\x96\x16\xe3\x4c\xbc\xbe\x92\ \xce\x19\x2e\xcd\xec\xa9\x38\x47\xd5\x4e\x4c\x56\xef\xbb\xa4\xe1\ \xdd\x24\xbe\x51\x05\x71\xd5\x10\x57\x98\xbd\xd2\xe8\x3e\xd2\xb9\ \x1e\x69\x58\x9a\x94\xf0\x90\x4e\x25\xaf\x11\x81\xa9\xd9\xd2\x3f\ \x3c\x6a\x6d\x41\x3f\xcb\xa1\xd0\xc4\x04\xc9\x3f\x48\xb1\xaf\xd0\ \x1c\xa1\xc6\xdf\x4c\xa9\xb7\x47\xa2\x54\xe2\x42\x89\xfb\xa4\xa4\ \x73\xb4\xa6\x02\xad\x6e\x41\xea\xe6\x93\xb8\xd5\x99\x0d\xfe\x4b\ \xea\x31\x5c\xf1\x5f\x91\x75\x10\xe9\xbc\x14\x27\xc6\xbe\x69\x9f\ \xc4\x7d\x49\xd2\x55\x99\x4e\xbc\x9d\x2a\x3d\x89\xc4\x74\x89\xf7\ \xa5\x31\xbd\xa5\xae\x97\x49\xac\x52\x88\x36\xa9\x47\x81\xfd\x3a\ \x35\x8a\x52\x27\xb1\x5c\x16\x6f\x48\x4f\xa1\x78\xca\x75\x6a\x4a\ \x78\x49\x35\x74\x68\x8e\x4b\x42\x59\x3e\x59\xef\xa1\xda\x00\x8a\ \x4f\x4f\x96\x28\x96\xae\x40\xc1\xfd\xf6\x04\x2a\xb0\x14\xc9\x3f\ \xde\x59\xe8\x34\x89\x4b\xa4\x94\x6e\xaa\x5d\x6a\xea\x45\xa1\x2f\ \x2c\x64\x6d\x41\x2a\xcd\x75\xf4\xe3\x11\x89\xd5\x92\xeb\x7d\xe9\ \xd2\x54\xe9\x17\xa4\x61\x45\x12\x1f\x4a\x54\x2a\xc6\x46\x29\x6d\ \x8c\xc4\x9b\xb2\x58\x2b\xa5\xa7\x39\x8b\x3d\x29\x75\x4f\x56\x94\ \xb5\x3a\x4c\x4c\x01\x0e\x4a\xc6\x33\x8a\xd2\xa4\x38\x01\x1d\x24\ \xa6\xf6\xb1\xcb\xa4\xf3\x5d\x8a\xe6\x4e\x51\x1d\x3f\xeb\x3b\xa4\ \xd5\xc4\xf4\x11\x12\xf1\xcf\x91\x1e\x47\x9a\x99\x2d\xe5\xf8\xa5\ \x2b\x4c\xe9\x5b\x54\x17\x45\xd6\x03\x6e\xc9\xdf\x5d\xe2\x5c\x7b\ \x66\xe8\xc9\x97\xae\x34\xf4\xe9\x09\x7b\x28\xba\x4a\x68\xb1\xd0\ \xd9\x16\xd4\xbc\x1d\xe9\x2a\xbf\xc4\x15\x12\xf3\xd5\x4a\xab\x2c\ \x8e\x4b\xb9\x23\xa5\x67\x90\xae\x2a\x94\x58\xa9\x06\xa4\x20\x4b\ \xed\xcb\x0b\xf6\x4a\xbc\xab\x38\xfb\x55\x81\x65\x4f\x79\xf9\x46\ \xed\x04\x74\x9a\x6a\x1d\x24\xa4\xad\x48\x11\x3a\x15\x34\x7f\x94\ \xdc\x53\x15\x49\xdd\xab\x36\xef\xef\xfa\xa2\xf0\xa0\x16\x76\x3d\ \xa2\x08\xeb\x74\x77\x9e\xe4\x9a\xdb\x9f\x79\xc6\xda\x14\xd8\x38\ \x19\x92\x8f\xc1\xe8\x30\xd1\x2b\x0d\x52\x9e\xea\x86\xf1\x45\x02\ \x14\x89\xc8\x05\x89\x54\xbd\xb8\x8f\xa5\x8f\x07\xf8\xfc\x21\xe1\ \xca\xb0\xc5\x32\xd9\x21\xcd\xfa\x20\x44\x4a\x81\x1b\xa2\xf8\x87\ \x9f\x80\xd0\x16\x12\x2a\x4f\x41\xd4\x87\xd1\x31\x02\xca\x2c\x22\ \xb3\x77\x11\x9d\xba\x12\xef\xba\x18\x66\x74\x24\xee\x3e\x9f\x41\ \x53\x00\xb8\x80\x28\x25\xf8\x68\xc6\x4b\x39\x71\xd2\x30\xc8\xe6\ \x77\xba\x10\x26\xca\x20\x76\xe1\x3f\x6f\x01\xee\xd0\x49\xde\xf0\ \xcd\x23\xd2\x69\xb1\xc2\x5f\xcc\x65\x9d\xd5\x84\x5b\x45\xa3\xbf\ \x81\x98\x75\x02\xde\xdf\x79\x95\x46\x6d\x7b\x4f\xd7\x1c\x9a\xad\ \xc0\x2f\x48\x2f\x21\x8d\xf4\x48\x5d\x8b\xa4\x3b\x0d\x35\x1c\xb2\ \x73\xfa\x1b\x16\xda\xd8\x89\x82\xb5\xa8\xea\x18\x7a\xc1\x42\xcf\ \x0b\xad\x13\xfa\x3d\x8c\xe2\x9b\x0d\x75\x9c\x34\xa4\xa3\xc8\x6a\ \xb4\x19\x50\x05\xdd\x25\x63\x92\xc4\x08\x89\x19\xd2\xf8\x02\xe9\ \xff\x21\x79\xee\x90\x8c\xcf\x25\xe3\x4a\xbb\x7f\x8c\x6a\xc9\x78\ \x47\x32\x26\x48\xac\x90\xd8\xa7\xbd\x48\x11\x9e\x95\xd8\xa9\x72\ \xa2\xb2\x58\xa7\x36\x0e\x4a\x2c\xd7\x7a\xb3\x5c\xa1\xae\x77\xa9\ \xf9\xfa\xb1\xba\xb6\xfc\x2e\x69\xd0\x10\x19\x0d\x7c\xab\xe5\x13\ \x5d\xdc\x64\x3e\x40\xda\xaf\x03\xc0\xbf\x0e\xba\x75\xf2\xf5\xe7\ \xa2\xc7\x06\x38\x5a\x6b\x32\xfe\x50\x1a\x19\xd5\x51\x68\x4f\x83\ \x84\x28\xd4\xd6\xd1\xf9\x3c\x6c\x19\x0f\x7d\x2c\xe8\xfe\x0a\xb8\ \xde\x99\x02\x6d\x57\x41\xe6\x87\x90\xe1\x42\x97\x54\x41\x71\x19\ \x9d\x57\x99\xf8\xdf\x4b\x41\x8b\x2e\x27\x16\xbe\x06\x4f\xc6\x6b\ \xe0\xa9\x85\xba\xc7\x61\xf8\xbb\x70\x32\x8c\x96\xb5\x12\x49\x08\ \xe2\x5b\x1e\x82\x77\xee\x21\xce\x2d\x1c\xa2\x3b\x3d\x88\x62\xb2\ \x95\xdd\x5c\xc6\xf9\x9e\x8d\xec\x1b\xf8\x15\xc5\x5d\x8a\xd9\xfc\ \xfd\x97\x4c\xce\x3c\x97\xfd\xf7\x1f\xa7\xab\xd9\x40\xc3\x2b\xb7\ \x61\xac\xfc\x3d\x41\x57\x7f\xdf\xdd\x1e\xe6\xac\x05\x4a\x4c\xea\ \x17\x59\x64\xee\xcb\xc1\x35\x0f\x38\xde\x1d\x54\x04\xf4\x71\xc2\ \xcf\x20\x3b\x00\x4d\x3a\x49\xfd\xe7\x65\xac\x4f\x86\x1b\xde\x05\ \xe3\x89\x31\xc4\x9b\x5f\x76\xac\x6c\x22\x2e\x8e\x41\xff\x77\xed\ \xbc\x9f\x93\x83\x36\x8e\xc2\xe0\x12\x3a\xb8\x80\x24\xbe\xb0\x83\ \xd7\xcd\x4b\x09\x2f\x81\x76\x13\x52\x0c\xf0\xec\x01\xf3\x43\xe0\ \xb7\xf1\x54\xec\xfb\x37\xf9\xb1\x12\x02\x34\x93\xe9\xdf\x0d\xe6\ \xd7\x90\x60\xc1\xed\x2b\xa8\x5e\xf7\x22\x4b\x0a\x07\x91\x5b\xe6\ \xa1\x3c\x2d\xc6\x4b\x67\x42\xb8\xae\x6e\x7e\x73\x5e\x72\xaf\x6d\ \xa4\xbf\xdf\x1b\x7a\xf9\x61\xdb\x34\x92\x7e\x18\x86\xf9\x51\x09\ \x34\x8c\x04\xa6\xdb\x73\x0c\x8e\x42\xd1\x01\xc8\xfc\x9e\xc8\xa4\ \xa3\x54\x4e\x6f\x64\x76\x29\xdc\x0e\x64\xee\x04\xea\xea\x31\x6b\ \x4e\x60\x5a\x51\x4c\xd7\x6f\xa0\x6e\x50\x6f\x40\xdd\x5d\x58\xa7\ \xfb\x62\x5a\x3e\xc4\x4c\x0c\xd2\x70\xf1\x7f\xd0\x77\x33\x9c\x13\ \xc4\x5d\x0f\xfe\x3a\x70\x6f\x07\xe3\xb8\xe3\x6c\x7a\x54\x91\xbe\ \x25\x9b\x5d\x4c\x22\x89\x8d\x24\x45\x0b\x68\x8d\x9c\x8f\x72\xf7\ \xd1\xb2\xeb\x72\x9a\xae\xf6\xb0\x6a\xfd\x9d\x7c\xf8\xa4\xc1\x23\ \xef\x1c\xa4\xa5\x7d\x0a\xff\x1f\xa7\x48\xb3\x27\x67\x17\xe2\x1e\ \x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\ " qt_resource_name = b"\ \x00\x05\ \x00\x73\x5e\x63\ \x00\x6c\ \x00\x6f\x00\x67\x00\x6f\x00\x73\ \x00\x08\ \x0a\x61\x5a\xa7\ \x00\x69\ \x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\ " qt_resource_struct_v1 = b"\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\ \x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ " qt_resource_struct_v2 = b"\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ \x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\ \x00\x00\x00\x00\x00\x00\x00\x00\ \x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ \x00\x00\x01\x74\x20\x97\x84\xbd\ " qt_version = [int(v) for v in QtCore.qVersion().split('.')] if qt_version < [5, 8, 0]: rcc_version = 1 qt_resource_struct = qt_resource_struct_v1 else: rcc_version = 2 qt_resource_struct = qt_resource_struct_v2 def qInitResources(): QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) qInitResources()
[ "silx.gui.qt.qRegisterResourceData", "silx.gui.qt.qVersion", "silx.gui.qt.qUnregisterResourceData" ]
[((27887, 27988), 'silx.gui.qt.qRegisterResourceData', 'QtCore.qRegisterResourceData', (['rcc_version', 'qt_resource_struct', 'qt_resource_name', 'qt_resource_data'], {}), '(rcc_version, qt_resource_struct,\n qt_resource_name, qt_resource_data)\n', (27915, 27988), True, 'from silx.gui import qt as QtCore\n'), ((28015, 28118), 'silx.gui.qt.qUnregisterResourceData', 'QtCore.qUnregisterResourceData', (['rcc_version', 'qt_resource_struct', 'qt_resource_name', 'qt_resource_data'], {}), '(rcc_version, qt_resource_struct,\n qt_resource_name, qt_resource_data)\n', (28045, 28118), True, 'from silx.gui import qt as QtCore\n'), ((27663, 27680), 'silx.gui.qt.qVersion', 'QtCore.qVersion', ([], {}), '()\n', (27678, 27680), True, 'from silx.gui import qt as QtCore\n')]
from datetime import timedelta from django.db.models import Sum, Q, DurationField from django.db.models.functions import Coalesce, Cast from django.utils import timezone from .models import ControllerSession from ..users.models import User, Status def annotate_hours(query): """ Annotates given QuerySet with controlling hours for the current (curr_hours), previous (prev_hours), and penultimate (prev_prev_hours) months. """ MONTH_NOW = timezone.now().month YEAR_NOW = timezone.now().year CURR_MONTH = (Q(sessions__start__month=MONTH_NOW) & Q(sessions__start__year=YEAR_NOW)) PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 1 if MONTH_NOW > 1 else 12) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 1 else YEAR_NOW - 1)) PREV_PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 2 if MONTH_NOW > 2 else 12 if MONTH_NOW > 1 else 11) & Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 2 else YEAR_NOW - 1)) return query.annotate( curr_hours=Coalesce(Sum('sessions__duration', filter=CURR_MONTH), Cast(timedelta(), DurationField())), prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_MONTH), Cast(timedelta(), DurationField())), prev_prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_PREV_MONTH), Cast(timedelta(), DurationField())), ) def get_user_hours(): """ Returns query set of active users annotated with controlling hours for the current (curr_hours), previous (prev_hours), and penultimate (prev_prev_hours) months. """ return annotate_hours(User.objects.exclude(status=Status.NON_MEMBER)) def get_top_controllers(): """ Returns query set of active users annotated with controlling hour sums for the current month (hours) sorted by most controlling hours (controllers with no hours are not included). """ SAME_MONTH = Q(sessions__start__month=timezone.now().month) SAME_YEAR = Q(sessions__start__year=timezone.now().year) users = User.objects.exclude(status=Status.NON_MEMBER) users = users.annotate(hours=Sum('sessions__duration', filter=SAME_MONTH & SAME_YEAR)) return users.exclude(hours__isnull=True).order_by('-hours') def get_top_positions(): SAME_MONTH = Q(start__month=timezone.now().month) SAME_YEAR = Q(start__year=timezone.now().year) sessions = ControllerSession.objects.filter(SAME_MONTH & SAME_YEAR) position_durations = {} for session in sessions: position = session.facility + '_' + session.level if position in position_durations: position_durations[position] += session.duration else: position_durations[position] = session.duration sorted_positions = sorted(position_durations, key=position_durations.get, reverse=True) return [{'position': position, 'hours': position_durations[position]} for position in sorted_positions] def get_daily_statistics(year, user=None): """ Returns a query dictionary of every day of the given year annotated with the controlling hours for that day. """ sessions = ControllerSession.objects.filter(start__year=year) if user: sessions = sessions.filter(user=user) return sessions.extra({'day': 'date(start)'}).values('day').annotate(value=Sum('duration'))
[ "django.utils.timezone.now", "django.db.models.DurationField", "django.db.models.Sum", "django.db.models.Q", "datetime.timedelta" ]
[((465, 479), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (477, 479), False, 'from django.utils import timezone\n'), ((501, 515), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (513, 515), False, 'from django.utils import timezone\n'), ((539, 574), 'django.db.models.Q', 'Q', ([], {'sessions__start__month': 'MONTH_NOW'}), '(sessions__start__month=MONTH_NOW)\n', (540, 574), False, 'from django.db.models import Sum, Q, DurationField\n'), ((595, 628), 'django.db.models.Q', 'Q', ([], {'sessions__start__year': 'YEAR_NOW'}), '(sessions__start__year=YEAR_NOW)\n', (596, 628), False, 'from django.db.models import Sum, Q, DurationField\n'), ((648, 712), 'django.db.models.Q', 'Q', ([], {'sessions__start__month': '(MONTH_NOW - 1 if MONTH_NOW > 1 else 12)'}), '(sessions__start__month=MONTH_NOW - 1 if MONTH_NOW > 1 else 12)\n', (649, 712), False, 'from django.db.models import Sum, Q, DurationField\n'), ((733, 801), 'django.db.models.Q', 'Q', ([], {'sessions__start__year': '(YEAR_NOW if MONTH_NOW > 1 else YEAR_NOW - 1)'}), '(sessions__start__year=YEAR_NOW if MONTH_NOW > 1 else YEAR_NOW - 1)\n', (734, 801), False, 'from django.db.models import Sum, Q, DurationField\n'), ((826, 920), 'django.db.models.Q', 'Q', ([], {'sessions__start__month': '(MONTH_NOW - 2 if MONTH_NOW > 2 else 12 if MONTH_NOW > 1 else 11)'}), '(sessions__start__month=MONTH_NOW - 2 if MONTH_NOW > 2 else 12 if \n MONTH_NOW > 1 else 11)\n', (827, 920), False, 'from django.db.models import Sum, Q, DurationField\n'), ((941, 1009), 'django.db.models.Q', 'Q', ([], {'sessions__start__year': '(YEAR_NOW if MONTH_NOW > 2 else YEAR_NOW - 1)'}), '(sessions__start__year=YEAR_NOW if MONTH_NOW > 2 else YEAR_NOW - 1)\n', (942, 1009), False, 'from django.db.models import Sum, Q, DurationField\n'), ((2131, 2187), 'django.db.models.Sum', 'Sum', (['"""sessions__duration"""'], {'filter': '(SAME_MONTH & SAME_YEAR)'}), "('sessions__duration', filter=SAME_MONTH & SAME_YEAR)\n", (2134, 2187), False, 'from django.db.models import Sum, Q, DurationField\n'), ((3340, 3355), 'django.db.models.Sum', 'Sum', (['"""duration"""'], {}), "('duration')\n", (3343, 3355), False, 'from django.db.models import Sum, Q, DurationField\n'), ((1067, 1111), 'django.db.models.Sum', 'Sum', (['"""sessions__duration"""'], {'filter': 'CURR_MONTH'}), "('sessions__duration', filter=CURR_MONTH)\n", (1070, 1111), False, 'from django.db.models import Sum, Q, DurationField\n'), ((1178, 1222), 'django.db.models.Sum', 'Sum', (['"""sessions__duration"""'], {'filter': 'PREV_MONTH'}), "('sessions__duration', filter=PREV_MONTH)\n", (1181, 1222), False, 'from django.db.models import Sum, Q, DurationField\n'), ((1294, 1343), 'django.db.models.Sum', 'Sum', (['"""sessions__duration"""'], {'filter': 'PREV_PREV_MONTH'}), "('sessions__duration', filter=PREV_PREV_MONTH)\n", (1297, 1343), False, 'from django.db.models import Sum, Q, DurationField\n'), ((1955, 1969), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1967, 1969), False, 'from django.utils import timezone\n'), ((2017, 2031), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2029, 2031), False, 'from django.utils import timezone\n'), ((2313, 2327), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2325, 2327), False, 'from django.utils import timezone\n'), ((2365, 2379), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2377, 2379), False, 'from django.utils import timezone\n'), ((1118, 1129), 'datetime.timedelta', 'timedelta', ([], {}), '()\n', (1127, 1129), False, 'from datetime import timedelta\n'), ((1131, 1146), 'django.db.models.DurationField', 'DurationField', ([], {}), '()\n', (1144, 1146), False, 'from django.db.models import Sum, Q, DurationField\n'), ((1229, 1240), 'datetime.timedelta', 'timedelta', ([], {}), '()\n', (1238, 1240), False, 'from datetime import timedelta\n'), ((1242, 1257), 'django.db.models.DurationField', 'DurationField', ([], {}), '()\n', (1255, 1257), False, 'from django.db.models import Sum, Q, DurationField\n'), ((1350, 1361), 'datetime.timedelta', 'timedelta', ([], {}), '()\n', (1359, 1361), False, 'from datetime import timedelta\n'), ((1363, 1378), 'django.db.models.DurationField', 'DurationField', ([], {}), '()\n', (1376, 1378), False, 'from django.db.models import Sum, Q, DurationField\n')]
# -*- coding: utf-8 -*- """ For testing neuromaps.stats functionality """ import numpy as np import pytest from neuromaps import stats @pytest.mark.xfail def test_compare_images(): assert False def test_permtest_metric(): rs = np.random.default_rng(12345678) x, y = rs.random(size=(2, 100)) r, p = stats.permtest_metric(x, y) assert np.allclose([r, p], [0.0345815411043023, 0.7192807192807192]) r, p = stats.permtest_metric(np.c_[x, x[::-1]], np.c_[y, y]) assert np.allclose(r, [0.0345815411043023, 0.03338608427980476]) assert np.allclose(p, [0.7192807192807192, 0.7472527472527473]) @pytest.mark.parametrize('x, y, expected', [ # basic one-dimensional input (range(5), range(5), (1.0, 0.0)), # broadcasting occurs regardless of input order (np.stack([range(5), range(5, 0, -1)], 1), range(5), ([1.0, -1.0], [0.0, 0.0])), (range(5), np.stack([range(5), range(5, 0, -1)], 1), ([1.0, -1.0], [0.0, 0.0])), # correlation between matching columns (np.stack([range(5), range(5, 0, -1)], 1), np.stack([range(5), range(5, 0, -1)], 1), ([1.0, 1.0], [0.0, 0.0])) ]) def test_efficient_pearsonr(x, y, expected): assert np.allclose(stats.efficient_pearsonr(x, y), expected) def test_efficient_pearsonr_errors(): with pytest.raises(ValueError): stats.efficient_pearsonr(range(4), range(5)) assert all(np.isnan(a) for a in stats.efficient_pearsonr([], []))
[ "neuromaps.stats.efficient_pearsonr", "numpy.allclose", "numpy.isnan", "numpy.random.default_rng", "pytest.raises", "neuromaps.stats.permtest_metric" ]
[((241, 272), 'numpy.random.default_rng', 'np.random.default_rng', (['(12345678)'], {}), '(12345678)\n', (262, 272), True, 'import numpy as np\n'), ((320, 347), 'neuromaps.stats.permtest_metric', 'stats.permtest_metric', (['x', 'y'], {}), '(x, y)\n', (341, 347), False, 'from neuromaps import stats\n'), ((359, 420), 'numpy.allclose', 'np.allclose', (['[r, p]', '[0.0345815411043023, 0.7192807192807192]'], {}), '([r, p], [0.0345815411043023, 0.7192807192807192])\n', (370, 420), True, 'import numpy as np\n'), ((433, 486), 'neuromaps.stats.permtest_metric', 'stats.permtest_metric', (['np.c_[x, x[::-1]]', 'np.c_[y, y]'], {}), '(np.c_[x, x[::-1]], np.c_[y, y])\n', (454, 486), False, 'from neuromaps import stats\n'), ((498, 555), 'numpy.allclose', 'np.allclose', (['r', '[0.0345815411043023, 0.03338608427980476]'], {}), '(r, [0.0345815411043023, 0.03338608427980476])\n', (509, 555), True, 'import numpy as np\n'), ((567, 623), 'numpy.allclose', 'np.allclose', (['p', '[0.7192807192807192, 0.7472527472527473]'], {}), '(p, [0.7192807192807192, 0.7472527472527473])\n', (578, 623), True, 'import numpy as np\n'), ((1214, 1244), 'neuromaps.stats.efficient_pearsonr', 'stats.efficient_pearsonr', (['x', 'y'], {}), '(x, y)\n', (1238, 1244), False, 'from neuromaps import stats\n'), ((1305, 1330), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1318, 1330), False, 'import pytest\n'), ((1401, 1412), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (1409, 1412), True, 'import numpy as np\n'), ((1422, 1454), 'neuromaps.stats.efficient_pearsonr', 'stats.efficient_pearsonr', (['[]', '[]'], {}), '([], [])\n', (1446, 1454), False, 'from neuromaps import stats\n')]
from openpyxl import Workbook from checkcel.validators import OntologyValidator, SetValidator, LinkedSetValidator from openpyxl.utils import get_column_letter from checkcel.checkplate import Checkplate class Checkerator(Checkplate): def __init__( self, output, **kwargs ): super(Checkerator, self).__init__(**kwargs) self.output = output def generate(self): wb = Workbook() current_data_column = 1 current_ontology_column = 1 current_set_column = 1 current_readme_row = 1 readme_sheet = wb.active readme_sheet.title = "README" data_sheet = wb.create_sheet(title="Data") ontology_sheet = None set_sheet = None set_columns = {} for column_name, validator in self.validators.items(): readme_sheet.cell(column=1, row=current_readme_row, value=validator.describe(column_name)) current_readme_row += 1 data_sheet.cell(column=current_data_column, row=1, value=column_name) if isinstance(validator, OntologyValidator): if not ontology_sheet: ontology_sheet = wb.create_sheet(title="Ontologies") data_validation = validator.generate(get_column_letter(current_data_column), get_column_letter(current_ontology_column), ontology_sheet) current_ontology_column += 1 elif isinstance(validator, SetValidator): # Total size, including separators must be < 256 if sum(len(i) for i in validator.valid_values) + len(validator.valid_values) - 1 > 256: if not set_sheet: set_sheet = wb.create_sheet(title="Sets") data_validation = validator.generate(get_column_letter(current_data_column), column_name, get_column_letter(current_set_column), set_sheet) current_set_column += 1 else: data_validation = validator.generate(get_column_letter(current_data_column)) set_columns[column_name] = get_column_letter(current_data_column) elif isinstance(validator, LinkedSetValidator): if not set_sheet: set_sheet = wb.create_sheet(title="Sets") data_validation = validator.generate(get_column_letter(current_data_column), set_columns, column_name, get_column_letter(current_set_column), set_sheet, wb) current_set_column += 1 set_columns[column_name] = get_column_letter(current_data_column) else: data_validation = validator.generate(get_column_letter(current_data_column)) if data_validation: data_sheet.add_data_validation(data_validation) current_data_column += 1 for sheet in wb.worksheets: for column_cells in sheet.columns: length = (max(len(self.as_text(cell.value)) for cell in column_cells) + 2) * 1.2 sheet.column_dimensions[get_column_letter(column_cells[0].column)].width = length wb.save(filename=self.output) def as_text(self, value): return str(value) if value is not None else ""
[ "openpyxl.utils.get_column_letter", "openpyxl.Workbook" ]
[((428, 438), 'openpyxl.Workbook', 'Workbook', ([], {}), '()\n', (436, 438), False, 'from openpyxl import Workbook\n'), ((1277, 1315), 'openpyxl.utils.get_column_letter', 'get_column_letter', (['current_data_column'], {}), '(current_data_column)\n', (1294, 1315), False, 'from openpyxl.utils import get_column_letter\n'), ((1317, 1359), 'openpyxl.utils.get_column_letter', 'get_column_letter', (['current_ontology_column'], {}), '(current_ontology_column)\n', (1334, 1359), False, 'from openpyxl.utils import get_column_letter\n'), ((2115, 2153), 'openpyxl.utils.get_column_letter', 'get_column_letter', (['current_data_column'], {}), '(current_data_column)\n', (2132, 2153), False, 'from openpyxl.utils import get_column_letter\n'), ((2566, 2604), 'openpyxl.utils.get_column_letter', 'get_column_letter', (['current_data_column'], {}), '(current_data_column)\n', (2583, 2604), False, 'from openpyxl.utils import get_column_letter\n'), ((3069, 3110), 'openpyxl.utils.get_column_letter', 'get_column_letter', (['column_cells[0].column'], {}), '(column_cells[0].column)\n', (3086, 3110), False, 'from openpyxl.utils import get_column_letter\n'), ((1806, 1844), 'openpyxl.utils.get_column_letter', 'get_column_letter', (['current_data_column'], {}), '(current_data_column)\n', (1823, 1844), False, 'from openpyxl.utils import get_column_letter\n'), ((1859, 1896), 'openpyxl.utils.get_column_letter', 'get_column_letter', (['current_set_column'], {}), '(current_set_column)\n', (1876, 1896), False, 'from openpyxl.utils import get_column_letter\n'), ((2032, 2070), 'openpyxl.utils.get_column_letter', 'get_column_letter', (['current_data_column'], {}), '(current_data_column)\n', (2049, 2070), False, 'from openpyxl.utils import get_column_letter\n'), ((2363, 2401), 'openpyxl.utils.get_column_letter', 'get_column_letter', (['current_data_column'], {}), '(current_data_column)\n', (2380, 2401), False, 'from openpyxl.utils import get_column_letter\n'), ((2429, 2466), 'openpyxl.utils.get_column_letter', 'get_column_letter', (['current_set_column'], {}), '(current_set_column)\n', (2446, 2466), False, 'from openpyxl.utils import get_column_letter\n'), ((2676, 2714), 'openpyxl.utils.get_column_letter', 'get_column_letter', (['current_data_column'], {}), '(current_data_column)\n', (2693, 2714), False, 'from openpyxl.utils import get_column_letter\n')]
from django.dispatch import receiver from django.db.models.signals import pre_save from django.db import models from authors.apps.articles.models import Article from authors.apps.profiles.models import Profile from simple_history.models import HistoricalRecords class Comment(models.Model): """ Handles CRUD on a comment that has been made on article """ body=models.TextField(max_length=500) createdAt=models.DateTimeField(auto_now_add=True) updatedAt=models.DateTimeField(auto_now=True) highlight_start = models.PositiveIntegerField(null=True, blank=True) highlight_end = models.PositiveIntegerField(null=True, blank=True) highlight_text = models.TextField(max_length=500, null=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE, related_name='authored_by') article=models.ForeignKey(Article,on_delete=models.CASCADE, related_name='article') comment_history = HistoricalRecords() class Meta: ordering=['-createdAt'] def __str__(self): return self.body class CommentReply(models.Model): """ Handles replying on a specific comment by made on an article """ comment=models.ForeignKey(Comment,on_delete=models.CASCADE,related_name='replies') reply_body=models.TextField() repliedOn=models.DateTimeField(auto_now_add=True) updatedOn=models.DateTimeField(auto_now=True) author=models.ForeignKey(Profile,on_delete=models.CASCADE) reply_history = HistoricalRecords() class Meta: ordering=['repliedOn'] def __str__(self): return self.reply_body class CommentLike(models.Model): """ Handles liking of a specific user by an authenticated user """ comment=models.ForeignKey(Comment,on_delete=models.CASCADE) like_status=models.BooleanField() liked_by=models.ForeignKey(Profile,on_delete=models.CASCADE) def __str__(self): return "like by {}".format(self.liked_by) class CommentReplyLike(models.Model): """ Holds data for liking reply made a comment """ liked=models.BooleanField() reply_like_by=models.ForeignKey(Profile,on_delete=models.CASCADE) comment_reply=models.ForeignKey(CommentReply,on_delete=models.CASCADE) def __str__(self): return "reply liked by {}".format(self.reply_like_by)
[ "django.db.models.TextField", "django.db.models.ForeignKey", "django.db.models.PositiveIntegerField", "django.db.models.BooleanField", "simple_history.models.HistoricalRecords", "django.db.models.DateTimeField" ]
[((377, 409), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (393, 409), False, 'from django.db import models\n'), ((424, 463), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (444, 463), False, 'from django.db import models\n'), ((478, 513), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (498, 513), False, 'from django.db import models\n'), ((536, 586), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (563, 586), False, 'from django.db import models\n'), ((607, 657), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (634, 657), False, 'from django.db import models\n'), ((679, 722), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(500)', 'null': '(True)'}), '(max_length=500, null=True)\n', (695, 722), False, 'from django.db import models\n'), ((734, 819), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Profile'], {'on_delete': 'models.CASCADE', 'related_name': '"""authored_by"""'}), "(Profile, on_delete=models.CASCADE, related_name='authored_by'\n )\n", (751, 819), False, 'from django.db import models\n'), ((826, 902), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Article'], {'on_delete': 'models.CASCADE', 'related_name': '"""article"""'}), "(Article, on_delete=models.CASCADE, related_name='article')\n", (843, 902), False, 'from django.db import models\n'), ((924, 943), 'simple_history.models.HistoricalRecords', 'HistoricalRecords', ([], {}), '()\n', (941, 943), False, 'from simple_history.models import HistoricalRecords\n'), ((1174, 1250), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Comment'], {'on_delete': 'models.CASCADE', 'related_name': '"""replies"""'}), "(Comment, on_delete=models.CASCADE, related_name='replies')\n", (1191, 1250), False, 'from django.db import models\n'), ((1264, 1282), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1280, 1282), False, 'from django.db import models\n'), ((1297, 1336), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1317, 1336), False, 'from django.db import models\n'), ((1351, 1386), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1371, 1386), False, 'from django.db import models\n'), ((1398, 1450), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Profile'], {'on_delete': 'models.CASCADE'}), '(Profile, on_delete=models.CASCADE)\n', (1415, 1450), False, 'from django.db import models\n'), ((1470, 1489), 'simple_history.models.HistoricalRecords', 'HistoricalRecords', ([], {}), '()\n', (1487, 1489), False, 'from simple_history.models import HistoricalRecords\n'), ((1723, 1775), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Comment'], {'on_delete': 'models.CASCADE'}), '(Comment, on_delete=models.CASCADE)\n', (1740, 1775), False, 'from django.db import models\n'), ((1791, 1812), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (1810, 1812), False, 'from django.db import models\n'), ((1826, 1878), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Profile'], {'on_delete': 'models.CASCADE'}), '(Profile, on_delete=models.CASCADE)\n', (1843, 1878), False, 'from django.db import models\n'), ((2065, 2086), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (2084, 2086), False, 'from django.db import models\n'), ((2105, 2157), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Profile'], {'on_delete': 'models.CASCADE'}), '(Profile, on_delete=models.CASCADE)\n', (2122, 2157), False, 'from django.db import models\n'), ((2175, 2232), 'django.db.models.ForeignKey', 'models.ForeignKey', (['CommentReply'], {'on_delete': 'models.CASCADE'}), '(CommentReply, on_delete=models.CASCADE)\n', (2192, 2232), False, 'from django.db import models\n')]
from django import forms class InterestForm(forms.Form): amount=forms.FloatField(label='Amount') rate=forms.FloatField(label="Interest rate" ,min_value=5 ,max_value=50)
[ "django.forms.FloatField" ]
[((73, 105), 'django.forms.FloatField', 'forms.FloatField', ([], {'label': '"""Amount"""'}), "(label='Amount')\n", (89, 105), False, 'from django import forms\n'), ((119, 185), 'django.forms.FloatField', 'forms.FloatField', ([], {'label': '"""Interest rate"""', 'min_value': '(5)', 'max_value': '(50)'}), "(label='Interest rate', min_value=5, max_value=50)\n", (135, 185), False, 'from django import forms\n')]
from selenium import webdriver import geckodriver_binary # Adds geckodriver binary to path def test_driver(): driver = webdriver.Firefox() driver.get("http://www.python.org") assert "Python" in driver.titl driver.quit()
[ "selenium.webdriver.Firefox" ]
[((125, 144), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {}), '()\n', (142, 144), False, 'from selenium import webdriver\n')]
############ # use_type ############ # type() ''' 动态语言和静态语言最大的不同,就是函数和类的定义,【不是编译时定义的,而是运行时动态创建的。】 ''' # 比方说我们要定义一个Hello的class,就写一个hello.py模块: ''' class Hello(object): def hello(self, name='world'): print('Hello, %s.' % name) ''' # 当Python解释器载入hello模块时,就会依次执行该模块的所有语句,执行结果就是动态创建出一个 # Hello的class对象,测试如下: from hello import Hello h = Hello() h.hello() print(type(Hello)) print(type(h)) ''' type()函数可以查看一个类型或变量的类型,Hello是一个class,它的类型就是type,而h是一个实例,它的类型就是class Hello. 我们说class的定义是运行时动态创建的,而创建class的方法就是使用type()函数。 type()函数既可以返回一个对象的类型,又可以创建出新的类型,比如,我们可以通过type()函数 创建出Hello类,而无需通过 class Hello(object)...的定义: ''' def fn(self, name='world'): # 先定义函数 print('Hello, %s.' % name) Hello = type('Hello', (object,), dict(hello=fn)) # 创建Hello class ''' 要创建一个class对象,type()函数一次传入3个参数: 1.class的名称; 2.继承的父类集合,注意Python支持多重继承,如果只有一个父类,别忘了tuple的单元素的写法; 3.class的方法名称与函数绑定,这里我们把函数fn绑定到方法名hello上。 通过type()函数创建的类和直接写class是完全一样的,因为Python解释器遇到class定义时, 仅仅是扫描一下class定义的语法,然后调用type()函数创建出class。 正常情况下,我们用class Xxx..来定义类,但是,type()函数也允许我们动态创建出类来,也就是说 动态语言本身支持动态创建类,这和静态语言有非常大的不同,要在静态语言运行时期创建类,必须构造源代码字符串 再调用编译器,或者借助一些工具生成字节码实现,本质上都是动态编译,会非常复杂。 ''' ################# # metaclass ################# ''' 除了使用type()动态创建类以外,要控制类的创建行为,还可以使用metaclass. metaclass,直译为元类,简单的解释就是: 当我们定义了类以后,就可以根据这个类创建出实例,所以: 先定义类,然后创建实例。 但是如果我们想创建出类呢?那就必须根据metaclass创建出类,所以:先定义metaclass,然后创建类。 连接起来就是: 先定义metaclass,就可以创建类,最后创建实例。 所以,metaclass允许你创建类或者修改类。换句话说,你可以把类看成是metaclass创建出来的“实例”。 metaclass是Python面向对象里最难理解,也是最难使用的魔术代码。正常情况下,你不会碰到需要使用metaclass的情况, 所以,以下内容看不懂也没关系,因为基本上你不会用到。 我们先看一个简单的例子,这个metaclass可以给我们自定义的MyList增加一个add方法: 定义ListMetaclass,按照默认习惯,metaclass的类名重视以Metaclass结尾,以便清楚地标识这是一个metaclass: ''' # metaclass是类的模板,所以必须从'type'类型派生: class ListMetaclass(type): def __new__(cls, name, bases, attrs): attrs['add'] = lambda self, value: self.append(value) return type.__new__(cls, name, bases, attrs) ''' 有了ListMetaclass,我们在定义类的时候还是指示要使用ListMetaclass来定制类,传入关键字参数metaclass: ''' class MyList(list, metaclass=ListMetaclass): pass ''' 当我们传入关键字参数metaclass时,魔术就生效了, 它指示Python解释器在创建MyList时, 要通过ListMetaclass.__new__()来创建。 在此,我们可以修改类的定义,比如,加上新的方法,然后,返回修改后的定义。 __new__()方法接收到的参数依次是: 1.当前准备创建的类的对象 2.类的名字 3.类继承的父类集合 4.类的方法集合 测试一下MyList是否可以调用add()方法: ''' L = MyList() L.add(1) print(L) ''' 而普通的list没有add()方法: ''' # L2 = list() # L2.add(1) ''' 动态修改有什么意义?直接在MyList定义中写上add()方法不是更简单吗? 正常情况下,确实应该直接写,通过metaclass修改纯属变态。 但是,总会遇到需要通过metaclass修改类定义的。ORM就是一个典型的例子。 ORM全称“Object Relational Mapping”,即对象-关系映射,就是把关系数据库的一行映射为一个对象,也就是一个类对应一个表,这样,写代码更简单,不用直接操作SQL语句。 要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来。 让我们来尝试编写一个ORM框架。 编写底层模块的第一步,就是先把调用接口写出来。比如,使用者如果使用这个ORM框架,想定义一个User类来操作对应的数据库表User,我们期待他写出这样的代码: class User(Model): # 定义类的属性到列的映射: id = IntegerField('id') name = StringField('username') email = StringField('email') password = StringField('password') # 创建一个实例: u = User(id=12345, name='Michael', email='<EMAIL>', password='<PASSWORD>') # 保存到数据库: u.save() 其中,父类Model和属性类型StringField、IntegerField是由ORM框架提供的,剩下的魔术方法比如save()全部由metaclass自动完成。虽然metaclass的编写会比较复杂,但ORM的使用者用起来却异常简单。 现在,我们就按上面的接口来实现该ORM。 首先来定义Field类,它负责保存数据库表的字段名和字段类型: ''' class Field(object): def __init__(self, name, column_type): self.name = name self.column_type = column_type def __str__(self): return '<%s:%s>' % (self.__class__.__name__, self.name) ''' 在Field的基础上,进一步定义各种类型的Field,比如StringField,IntegerField等等: ''' class StringField(Field): def __init__(self, name): super(StringField, self).__init__(name, 'varchar(100)') class IntegerField(Field): def __init__(self, name): super(IntegerField, self).__init__(name, 'bigint') ''' 下一步,就是编写最复杂的ModelMetaclass了: '''
[ "hello.Hello" ]
[((345, 352), 'hello.Hello', 'Hello', ([], {}), '()\n', (350, 352), False, 'from hello import Hello\n')]
import numpy as np import pytest import snc.environments.job_generators.discrete_review_job_generator \ as drjg import snc.environments.controlled_random_walk as crw import snc.environments.state_initialiser as si import snc.agents.general_heuristics.random_nonidling_agent \ as random_nonidling_agent import snc.agents.general_heuristics.longest_buffer_priority_agent \ as longest_priority_agent import snc.agents.general_heuristics.custom_activity_priority_agent \ as custom_priority_agent def get_null_env_params(state, num_resources=None, buffer_processing_matrix=None, constituency_matrix=None): num_buffers = state.shape[0] arrival_rate = np.ones_like(state) if num_resources is None: num_resources = num_buffers if buffer_processing_matrix is None: buffer_processing_matrix = -np.triu(np.ones((num_buffers, num_resources))) if constituency_matrix is None: constituency_matrix = np.zeros((num_resources, num_resources)) time_interval = 1 return { "cost_per_buffer": np.zeros_like(state), "capacity": np.zeros_like(state), "constituency_matrix": constituency_matrix, "job_generator": drjg.DeterministicDiscreteReviewJobGenerator( arrival_rate, buffer_processing_matrix, sim_time_interval=time_interval ), "state_initialiser": si.DeterministicCRWStateInitialiser(state), "job_conservation_flag": True, "list_boundary_constraint_matrices": None, } def test_random_heuristic_agent_starving(): # Single server queue safety_stock = 10.0 state = 5 * np.ones((1, 1)) env_params = get_null_env_params(state) env_params["constituency_matrix"] = np.ones((1, 1)) env_params["list_boundary_constraint_matrices"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.zeros((1, 1)) def test_random_heuristic_agent(): # Single server queue safety_stock = 1.0 state = 1.1 * np.ones((1, 1)) env_params = get_null_env_params(state) env_params["constituency_matrix"] = np.ones((1, 1)) env_params["list_boundary_constraint_matrices"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.ones((1, 1)) def test_random_heuristic_agent_multiple_buffers_eye_condition_starving(): # Station scheduling three buffers, each of them having to be above safety stock safety_stock = 10.0 state = 5 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params["constituency_matrix"] = np.ones((1, 3)) env_params["list_boundary_constraint_matrices"] = [np.eye(3)] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_eye_condition(): # Station scheduling three buffers, each of them having to be above safety stock safety_stock = 1.0 state = 1.1 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params["constituency_matrix"] = np.ones((1, 3)) env_params["list_boundary_constraint_matrices"] = [np.eye(3)] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_sum_condition_starving(): # Station scheduling three buffers, the sum of their size having to be above safety stock safety_stock = 10.0 state = 3 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params["constituency_matrix"] = np.ones((1, 3)) env_params["list_boundary_constraint_matrices"] = [np.ones((1, 3))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((3, 1))) def test_random_heuristic_agent_multiple_buffers_sum_condition(): # Station scheduling three buffers, the sum of their size having to be above safety stock safety_stock = 10.0 state = 5 * np.ones((3, 1)) env_params = get_null_env_params(state) env_params["constituency_matrix"] = np.ones((1, 3)) env_params["list_boundary_constraint_matrices"] = [np.ones((1, 3))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond_starving(): # Two stations, each one scheduling two buffers, each of them having to be above safety stock. safety_stock = 10.0 state = 5 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params["list_boundary_constraint_matrices"] = [np.hstack((np.eye(2), np.zeros((2, 2)))), np.hstack((np.zeros((2, 2)), np.eye(2)))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond(): # Two stations, each one scheduling two buffers, each of them having to be above safety stock. safety_stock = 9.9 state = 10 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params["list_boundary_constraint_matrices"] = [np.hstack((np.eye(2), np.zeros((2, 2)))), np.hstack((np.zeros((2, 2)), np.eye(2)))] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_starving(): # Two stations, each one scheduling two buffers, the sum of their size having to be above # safety stock. safety_stock = 10 state = 4 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((4, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers, the sum of their size having to be above safety # stock. safety_stock = 9.9 state = 5 * np.ones((4, 1)) env_params = get_null_env_params(state) env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 2 def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_1_starve(): # Two stations, each one scheduling two buffers, the sum of their size having to be above safety # stock. safety_stock = 9.9 state = np.array([4, 5, 5, 5])[:, None] env_params = get_null_env_params(state) env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[2:4]) == 1 and np.all(action[0:2] == np.zeros((2, 1))) def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_2_starve(): # Two stations, each one scheduling two buffers, the sum of their size having to be above safety # stock. safety_stock = 9.9 state = np.array([5, 5, 5, 4])[:, None] env_params = get_null_env_params(state) env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_starving(): # Single server queue buffer_processing_matrix = - np.ones((1, 1)) safety_stock = 10.0 state = 5 * np.ones((1, 1)) env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params["constituency_matrix"] = np.ones((1, 1)) env_params["list_boundary_constraint_matrices"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert action == np.zeros((1, 1)) def test_priority_nonidling_heuristic_agent(): # Single server queue buffer_processing_matrix = - np.ones((1, 1)) safety_stock = 4.0 state = 5 * np.ones((1, 1)) env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params["constituency_matrix"] = np.ones((1, 1)) env_params["list_boundary_constraint_matrices"] = [np.ones((1, 1))] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock, name="LPAAgent") action = agent.map_state_to_actions(state) assert action == np.ones((1, 1)) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_starving(): # One station scheduling two buffers, one larger than the other, but both below safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params["constituency_matrix"] = np.ones((1, 2)) env_params["list_boundary_constraint_matrices"] = [np.eye(2)] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.zeros((2, 1))) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_small_one_starve(): # One station scheduling two buffers, one larger than the other. Only the large one is above # safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([9, 11])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params["constituency_matrix"] = np.ones((1, 2)) env_params["list_boundary_constraint_matrices"] = [np.eye(2)] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_eye_cond_small_one_starve_reverse_ord(): # One station scheduling two buffers, one larger than the other. Only the large one is above # safety stock, swap order with respect to previous test. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([11, 10])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params["constituency_matrix"] = np.ones((1, 2)) env_params["list_boundary_constraint_matrices"] = [np.eye(2)] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition(): # One station scheduling two buffers, one larger than the other, both above safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([30, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params["constituency_matrix"] = np.ones((1, 2)) env_params["list_boundary_constraint_matrices"] = [np.eye(2)] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_reverse_order(): # One station scheduling two buffers, one larger than the other, both above safety stock (swap # order with previous test). buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([20, 30])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params["constituency_matrix"] = np.ones((1, 2)) env_params["list_boundary_constraint_matrices"] = [np.eye(2)] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_eye_condition(): # One station scheduling two buffers, both equal and above safety stock. buffer_processing_matrix = - np.eye(2) safety_stock = 10.0 state = np.array([11, 11])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params["constituency_matrix"] = np.ones((1, 2)) env_params["list_boundary_constraint_matrices"] = [np.eye(2)] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action) == 1 def test_priority_nonidling_heuristic_agent_multiple_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers. The stations are connected in serial, such that # buffer 1 is connected with buffer 3, and 2 with 4. # Kind of condition doesn't matter since the largest buffer has to be above safety stock in this # agent. buffer_processing_matrix = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]) safety_stock = 10 state = np.array([30, 20, 20, 30])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0, 0, 1])[:, None]) def test_priority_nonidling_heuristic_agent_multi_buffers_and_resources_sum_cond_reverse_order(): # Two stations, each one scheduling two buffers. The stations are connected in serial, such that # buffer 1 is connected with buffer 3, and 2 with 4. # Kind of condition doesn't matter since the largest buffer has to be above safety stock in this # agent. buffer_processing_matrix = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]) safety_stock = 10 state = np.array([20, 30, 30, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([0, 1, 1, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_buffers_and_resources_sum_cond_2_starve(): # Two stations, each one scheduling two buffers. The stations are connected in serial, such that # buffer 1 is connected with buffer 3, and 2 with 4. # Kind of condition doesn't matter since the largest buffer has to be above safety stock in this # agent. buffer_processing_matrix = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]) safety_stock = 10 state = np.array([30, 20, 9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.all(action == np.array([1, 0, 0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_multiple_resources_sum_cond(): # Two stations, each one scheduling two buffers. The stations are connected in serial, such that # buffer 1 is connected with buffer 3, and 2 with 4. # Kind of condition doesn't matter since the largest buffer has to be above safety stock in this # agent. buffer_processing_matrix = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]) safety_stock = 10 state = np.array([30, 30, 9, 5])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.array([0, 0])[:, None]) def test_priority_nonidling_heuristic_agent_multiple_activities_buffers_and_resources(): # Two stations, each one scheduling two buffers. The stations are connected in serial, such that # buffer 1 is connected with buffer 3, and 2 with 4. # Kind of condition doesn't matter since the largest buffer has to be above safety stock in this # agent. buffer_processing_matrix = np.array([[-1, 0, -1, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]) safety_stock = 10 state = np.array([30, 20, 5, 20])[:, None] env_params = get_null_env_params( state, buffer_processing_matrix=buffer_processing_matrix) env_params["constituency_matrix"] = np.array([[1, 1, 1, 0], [0, 0, 1, 1]]) env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0], [0, 0, 1, 0]]), np.array([[0, 0, 1, 1]])] env = crw.ControlledRandomWalk(**env_params) agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock) action = agent.map_state_to_actions(state) assert (action[0] + action[2] == 1) and (action[1] == 0) and (action[3] == 1) def test_priority_heuristic_agent_init_all_resources_given(): priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.], [0., -1., -1., 0., 0., 0., 0.], [0., 0., 0., -1., -1., -1., -1.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.], [0., 1., 1., 0., 0., 0., 0.], [0., 0., 0., 1., 1., 1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == priorities def test_priority_heuristic_agent_init_not_all_resources_given(): priorities = {0: 0, 2: 5} expected_priorities = {0: 0, 1: None, 2: 5} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.], [0., -1., -1., 0., 0., 0., 0.], [0., 0., 0., -1., -1., -1., -1.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.], [0., 1., 1., 0., 0., 0., 0.], [0., 0., 0., 1., 1., 1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) assert agent.priorities == expected_priorities def test_priority_heuristic_agent_init_wrong_activity_given(): priorities = {0: 0, 2: 1} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.], [0., -1., -1., 0., 0., 0., 0.], [0., 0., 0., -1., -1., -1., -1.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.], [0., 1., 1., 0., 0., 0., 0.], [0., 0., 0., 1., 1., 1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) with pytest.raises(AssertionError): _ = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) def test_priority_heuristic_agent_sample_random_action_empty_possible_actions(): priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.], [10.], [0.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.], [0., -1., -1., 0., 0., 0., 0.], [0., 0., 0., -1., -1., -1., -1.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.], [0., 1., 1., 0., 0., 0., 0.], [0., 0., 0., 1., 1., 1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [1], [0], [0], [0], [0]]) activities = np.array([3, 4, 5, 6]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(action == updated_action) def test_priority_heuristic_agent_sample_random_action_one_possible_action(): priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.], [0.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., -1., 0., 0., 0., 0.], [0., -1., 0., 0., 0., 0., 0.], [0., 0., 0., -1., -1., -1., -1.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.], [0., 1., 1., 0., 0., 0., 0.], [0., 0., 0., 1., 1., 1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [0], [0], [1], [0], [0]]) expected_action = np.array([[1], [0], [1], [0], [1], [0], [0]]) activities = np.array([1, 2]) updated_action = agent.sample_random_actions(state=state, action=action, activities=activities) assert np.all(expected_action == updated_action) def test_priority_heuristic_agent_sample_random_action_multiple_possible_actions(): np.random.seed(42) priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.], [0., -1., -1., 0., 0., 0., 0.], [0., 0., 0., -1., -1., -1., -1.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.], [0., 1., 1., 0., 0., 0., 0.], [0., 0., 0., 1., 1., 1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) action = np.array([[1], [0], [1], [0], [0], [0], [0]]) expected_action = np.array([[1], [0], [1], [0.25], [0.25], [0.25], [0.25]]) activities = np.array([3, 4, 5, 6]) num_sim = int(1e4) updated_action = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim): updated_action[:, [i]] = agent.sample_random_actions(state=state, action=action, activities=activities) average_updated_action = np.sum(updated_action, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_updated_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_no_priorities(): np.random.seed(42) priorities = {} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.], [0., -1., -1., 0., 0., 0., 0.], [0., 0., 0., -1., -1., -1., -1.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.], [0., 1., 1., 0., 0., 0., 0.], [0., 0., 0., 1., 1., 1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0.5], [0.5], [0.25], [0.25], [0.25], [0.25]]) num_sim = int(1e4) actions = np.zeros((buffer_processing_matrix.shape[1], num_sim)) for i in np.arange(num_sim): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) / float(num_sim) np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_action, decimal=2) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_empty_buffer(): np.random.seed(41) priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.], [10.], [0.]]) buffer_processing_matrix = np.array([[-1., 0., 0., -1., -1., 0., -1.], [0., -1., -1., 0., 0., 0., 0.], [0., 0., 0., 0., 0., -1., 0.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.], [0., 1., 1., 0., 0., 0., 0.], [0., 0., 0., 1., 1., 1., 1.]]) constituency_matrix_original = constituency_matrix.copy() env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_average_action = np.array([[1], [0.], [1.], [0.33], [0.33], [0.], [0.33]]) num_sim = 5e4 actions = np.zeros((buffer_processing_matrix.shape[1], int(num_sim))) for i in np.arange(int(num_sim)): actions[:, [i]] = agent.map_state_to_actions(state=state) average_action = np.sum(actions, axis=1) / num_sim np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_average_action, decimal=2) assert np.all(constituency_matrix_original == constituency_matrix) assert np.all(constituency_matrix_original == env.constituency_matrix) def test_priority_heuristic_agent_map_state_to_actions_full_priorities_full_buffer(): priorities = {0: 0, 1: 2, 2: 5} state = np.array([[10.], [10.], [10.]]) buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.], [0., -1., -1., 0., 0., 0., 0.], [0., 0., 0., -1., -1., -1., -1.]]) constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.], [0., 1., 1., 0., 0., 0., 0.], [0., 0., 0., 1., 1., 1., 1.]]) env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix, constituency_matrix=constituency_matrix) env = crw.ControlledRandomWalk(**env_params) agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities) expected_action = np.array([[1], [0], [1], [0], [0], [1], [0]]) action = agent.map_state_to_actions(state=state) assert np.all(action == expected_action)
[ "snc.environments.job_generators.discrete_review_job_generator.DeterministicDiscreteReviewJobGenerator", "numpy.zeros_like", "numpy.ones_like", "numpy.random.seed", "numpy.sum", "snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent", "numpy.zeros", "numpy.ones", "snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent", "pytest.raises", "snc.environments.state_initialiser.DeterministicCRWStateInitialiser", "numpy.array", "numpy.arange", "numpy.eye", "snc.environments.controlled_random_walk.ControlledRandomWalk", "snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent", "numpy.all" ]
[((696, 715), 'numpy.ones_like', 'np.ones_like', (['state'], {}), '(state)\n', (708, 715), True, 'import numpy as np\n'), ((1739, 1754), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (1746, 1754), True, 'import numpy as np\n'), ((1838, 1876), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (1862, 1876), True, 'import snc.environments.controlled_random_walk as crw\n'), ((1889, 1951), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (1932, 1951), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((2241, 2256), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (2248, 2256), True, 'import numpy as np\n'), ((2340, 2378), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (2364, 2378), True, 'import snc.environments.controlled_random_walk as crw\n'), ((2391, 2453), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (2434, 2453), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((2840, 2855), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (2847, 2855), True, 'import numpy as np\n'), ((2933, 2971), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (2957, 2971), True, 'import snc.environments.controlled_random_walk as crw\n'), ((2984, 3046), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (3027, 3046), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((3434, 3449), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (3441, 3449), True, 'import numpy as np\n'), ((3527, 3565), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (3551, 3565), True, 'import snc.environments.controlled_random_walk as crw\n'), ((3578, 3640), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (3621, 3640), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((4030, 4045), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (4037, 4045), True, 'import numpy as np\n'), ((4129, 4167), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (4153, 4167), True, 'import snc.environments.controlled_random_walk as crw\n'), ((4180, 4242), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (4223, 4242), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((4638, 4653), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (4645, 4653), True, 'import numpy as np\n'), ((4737, 4775), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (4761, 4775), True, 'import snc.environments.controlled_random_walk as crw\n'), ((4788, 4850), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (4831, 4850), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((5259, 5297), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (5267, 5297), True, 'import numpy as np\n'), ((5503, 5541), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (5527, 5541), True, 'import snc.environments.controlled_random_walk as crw\n'), ((5554, 5616), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (5597, 5616), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((6031, 6069), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (6039, 6069), True, 'import numpy as np\n'), ((6275, 6313), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (6299, 6313), True, 'import snc.environments.controlled_random_walk as crw\n'), ((6326, 6388), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (6369, 6388), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((6810, 6848), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (6818, 6848), True, 'import numpy as np\n'), ((7022, 7060), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (7046, 7060), True, 'import snc.environments.controlled_random_walk as crw\n'), ((7073, 7135), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (7116, 7135), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((7564, 7602), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (7572, 7602), True, 'import numpy as np\n'), ((7776, 7814), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (7800, 7814), True, 'import snc.environments.controlled_random_walk as crw\n'), ((7827, 7889), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (7870, 7889), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((8324, 8362), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (8332, 8362), True, 'import numpy as np\n'), ((8536, 8574), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (8560, 8574), True, 'import snc.environments.controlled_random_walk as crw\n'), ((8587, 8649), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (8630, 8649), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((9133, 9171), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (9141, 9171), True, 'import numpy as np\n'), ((9345, 9383), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (9369, 9383), True, 'import snc.environments.controlled_random_walk as crw\n'), ((9396, 9458), 'snc.agents.general_heuristics.random_nonidling_agent.RandomNonIdlingAgent', 'random_nonidling_agent.RandomNonIdlingAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (9439, 9458), True, 'import snc.agents.general_heuristics.random_nonidling_agent as random_nonidling_agent\n'), ((9919, 9934), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (9926, 9934), True, 'import numpy as np\n'), ((10018, 10056), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (10042, 10056), True, 'import snc.environments.controlled_random_walk as crw\n'), ((10069, 10137), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (10118, 10137), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((10546, 10561), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (10553, 10561), True, 'import numpy as np\n'), ((10645, 10683), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (10669, 10683), True, 'import snc.environments.controlled_random_walk as crw\n'), ((10696, 10786), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {'name': '"""LPAAgent"""'}), "(env, safety_stock, name=\n 'LPAAgent')\n", (10745, 10786), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((11302, 11317), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (11309, 11317), True, 'import numpy as np\n'), ((11395, 11433), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (11419, 11433), True, 'import snc.environments.controlled_random_walk as crw\n'), ((11446, 11514), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (11495, 11514), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((12072, 12087), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (12079, 12087), True, 'import numpy as np\n'), ((12165, 12203), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (12189, 12203), True, 'import snc.environments.controlled_random_walk as crw\n'), ((12216, 12284), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (12265, 12284), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((12898, 12913), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (12905, 12913), True, 'import numpy as np\n'), ((12991, 13029), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (13015, 13029), True, 'import snc.environments.controlled_random_walk as crw\n'), ((13042, 13110), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (13091, 13110), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((13638, 13653), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (13645, 13653), True, 'import numpy as np\n'), ((13731, 13769), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (13755, 13769), True, 'import snc.environments.controlled_random_walk as crw\n'), ((13782, 13850), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (13831, 13850), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((14430, 14445), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (14437, 14445), True, 'import numpy as np\n'), ((14523, 14561), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (14547, 14561), True, 'import snc.environments.controlled_random_walk as crw\n'), ((14574, 14642), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (14623, 14642), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((15161, 15176), 'numpy.ones', 'np.ones', (['(1, 2)'], {}), '((1, 2))\n', (15168, 15176), True, 'import numpy as np\n'), ((15254, 15292), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (15278, 15292), True, 'import snc.environments.controlled_random_walk as crw\n'), ((15305, 15373), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (15354, 15373), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((15849, 15919), 'numpy.array', 'np.array', (['[[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]'], {}), '([[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]])\n', (15857, 15919), True, 'import numpy as np\n'), ((16257, 16295), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (16265, 16295), True, 'import numpy as np\n'), ((16469, 16507), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (16493, 16507), True, 'import snc.environments.controlled_random_walk as crw\n'), ((16520, 16588), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (16569, 16588), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((17100, 17170), 'numpy.array', 'np.array', (['[[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]'], {}), '([[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]])\n', (17108, 17170), True, 'import numpy as np\n'), ((17508, 17546), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (17516, 17546), True, 'import numpy as np\n'), ((17720, 17758), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (17744, 17758), True, 'import snc.environments.controlled_random_walk as crw\n'), ((17771, 17839), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (17820, 17839), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((18349, 18419), 'numpy.array', 'np.array', (['[[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]'], {}), '([[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]])\n', (18357, 18419), True, 'import numpy as np\n'), ((18755, 18793), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (18763, 18793), True, 'import numpy as np\n'), ((18967, 19005), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (18991, 19005), True, 'import snc.environments.controlled_random_walk as crw\n'), ((19018, 19086), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (19067, 19086), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((19600, 19670), 'numpy.array', 'np.array', (['[[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]'], {}), '([[-1, 0, 0, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]])\n', (19608, 19670), True, 'import numpy as np\n'), ((20006, 20044), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 1]])\n', (20014, 20044), True, 'import numpy as np\n'), ((20218, 20256), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (20242, 20256), True, 'import snc.environments.controlled_random_walk as crw\n'), ((20269, 20337), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (20318, 20337), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((20868, 20939), 'numpy.array', 'np.array', (['[[-1, 0, -1, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]]'], {}), '([[-1, 0, -1, 0], [0, -1, 0, 0], [1, 0, -1, 0], [0, 1, 0, -1]])\n', (20876, 20939), True, 'import numpy as np\n'), ((21276, 21314), 'numpy.array', 'np.array', (['[[1, 1, 1, 0], [0, 0, 1, 1]]'], {}), '([[1, 1, 1, 0], [0, 0, 1, 1]])\n', (21284, 21314), True, 'import numpy as np\n'), ((21502, 21540), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (21526, 21540), True, 'import snc.environments.controlled_random_walk as crw\n'), ((21553, 21621), 'snc.agents.general_heuristics.longest_buffer_priority_agent.LongestBufferPriorityAgent', 'longest_priority_agent.LongestBufferPriorityAgent', (['env', 'safety_stock'], {}), '(env, safety_stock)\n', (21602, 21621), True, 'import snc.agents.general_heuristics.longest_buffer_priority_agent as longest_priority_agent\n'), ((21908, 21942), 'numpy.array', 'np.array', (['[[10.0], [10.0], [10.0]]'], {}), '([[10.0], [10.0], [10.0]])\n', (21916, 21942), True, 'import numpy as np\n'), ((21971, 22103), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]])\n', (21979, 22103), True, 'import numpy as np\n'), ((22187, 22313), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (22195, 22313), True, 'import numpy as np\n'), ((22550, 22588), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (22574, 22588), True, 'import snc.environments.controlled_random_walk as crw\n'), ((22601, 22667), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (22650, 22667), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((22869, 22903), 'numpy.array', 'np.array', (['[[10.0], [10.0], [10.0]]'], {}), '([[10.0], [10.0], [10.0]])\n', (22877, 22903), True, 'import numpy as np\n'), ((22932, 23064), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]])\n', (22940, 23064), True, 'import numpy as np\n'), ((23148, 23274), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (23156, 23274), True, 'import numpy as np\n'), ((23511, 23549), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (23535, 23549), True, 'import snc.environments.controlled_random_walk as crw\n'), ((23562, 23628), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (23611, 23628), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((23788, 23822), 'numpy.array', 'np.array', (['[[10.0], [10.0], [10.0]]'], {}), '([[10.0], [10.0], [10.0]])\n', (23796, 23822), True, 'import numpy as np\n'), ((23851, 23983), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]])\n', (23859, 23983), True, 'import numpy as np\n'), ((24067, 24193), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (24075, 24193), True, 'import numpy as np\n'), ((24430, 24468), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (24454, 24468), True, 'import snc.environments.controlled_random_walk as crw\n'), ((24719, 24752), 'numpy.array', 'np.array', (['[[10.0], [10.0], [0.0]]'], {}), '([[10.0], [10.0], [0.0]])\n', (24727, 24752), True, 'import numpy as np\n'), ((24781, 24913), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]])\n', (24789, 24913), True, 'import numpy as np\n'), ((24997, 25123), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (25005, 25123), True, 'import numpy as np\n'), ((25360, 25398), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (25384, 25398), True, 'import snc.environments.controlled_random_walk as crw\n'), ((25411, 25477), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (25460, 25477), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((25492, 25537), 'numpy.array', 'np.array', (['[[1], [0], [1], [0], [0], [0], [0]]'], {}), '([[1], [0], [1], [0], [0], [0], [0]])\n', (25500, 25537), True, 'import numpy as np\n'), ((25555, 25577), 'numpy.array', 'np.array', (['[3, 4, 5, 6]'], {}), '([3, 4, 5, 6])\n', (25563, 25577), True, 'import numpy as np\n'), ((25689, 25721), 'numpy.all', 'np.all', (['(action == updated_action)'], {}), '(action == updated_action)\n', (25695, 25721), True, 'import numpy as np\n'), ((25850, 25883), 'numpy.array', 'np.array', (['[[10.0], [0.0], [10.0]]'], {}), '([[10.0], [0.0], [10.0]])\n', (25858, 25883), True, 'import numpy as np\n'), ((25912, 26044), 'numpy.array', 'np.array', (['[[-1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]'], {}), '([[-1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0.0, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]])\n', (25920, 26044), True, 'import numpy as np\n'), ((26128, 26254), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (26136, 26254), True, 'import numpy as np\n'), ((26491, 26529), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (26515, 26529), True, 'import snc.environments.controlled_random_walk as crw\n'), ((26542, 26608), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (26591, 26608), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((26623, 26668), 'numpy.array', 'np.array', (['[[1], [0], [0], [0], [1], [0], [0]]'], {}), '([[1], [0], [0], [0], [1], [0], [0]])\n', (26631, 26668), True, 'import numpy as np\n'), ((26691, 26736), 'numpy.array', 'np.array', (['[[1], [0], [1], [0], [1], [0], [0]]'], {}), '([[1], [0], [1], [0], [1], [0], [0]])\n', (26699, 26736), True, 'import numpy as np\n'), ((26754, 26770), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (26762, 26770), True, 'import numpy as np\n'), ((26882, 26923), 'numpy.all', 'np.all', (['(expected_action == updated_action)'], {}), '(expected_action == updated_action)\n', (26888, 26923), True, 'import numpy as np\n'), ((27014, 27032), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (27028, 27032), True, 'import numpy as np\n'), ((27081, 27115), 'numpy.array', 'np.array', (['[[10.0], [10.0], [10.0]]'], {}), '([[10.0], [10.0], [10.0]])\n', (27089, 27115), True, 'import numpy as np\n'), ((27144, 27276), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]])\n', (27152, 27276), True, 'import numpy as np\n'), ((27360, 27486), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (27368, 27486), True, 'import numpy as np\n'), ((27723, 27761), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (27747, 27761), True, 'import snc.environments.controlled_random_walk as crw\n'), ((27774, 27840), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (27823, 27840), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((27855, 27900), 'numpy.array', 'np.array', (['[[1], [0], [1], [0], [0], [0], [0]]'], {}), '([[1], [0], [1], [0], [0], [0], [0]])\n', (27863, 27900), True, 'import numpy as np\n'), ((27923, 27980), 'numpy.array', 'np.array', (['[[1], [0], [1], [0.25], [0.25], [0.25], [0.25]]'], {}), '([[1], [0], [1], [0.25], [0.25], [0.25], [0.25]])\n', (27931, 27980), True, 'import numpy as np\n'), ((27998, 28020), 'numpy.array', 'np.array', (['[3, 4, 5, 6]'], {}), '([3, 4, 5, 6])\n', (28006, 28020), True, 'import numpy as np\n'), ((28065, 28119), 'numpy.zeros', 'np.zeros', (['(buffer_processing_matrix.shape[1], num_sim)'], {}), '((buffer_processing_matrix.shape[1], num_sim))\n', (28073, 28119), True, 'import numpy as np\n'), ((28133, 28151), 'numpy.arange', 'np.arange', (['num_sim'], {}), '(num_sim)\n', (28142, 28151), True, 'import numpy as np\n'), ((28630, 28648), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (28644, 28648), True, 'import numpy as np\n'), ((28681, 28715), 'numpy.array', 'np.array', (['[[10.0], [10.0], [10.0]]'], {}), '([[10.0], [10.0], [10.0]])\n', (28689, 28715), True, 'import numpy as np\n'), ((28744, 28876), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]])\n', (28752, 28876), True, 'import numpy as np\n'), ((28960, 29086), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (28968, 29086), True, 'import numpy as np\n'), ((29323, 29361), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (29347, 29361), True, 'import snc.environments.controlled_random_walk as crw\n'), ((29374, 29440), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (29423, 29440), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((29464, 29525), 'numpy.array', 'np.array', (['[[1], [0.5], [0.5], [0.25], [0.25], [0.25], [0.25]]'], {}), '([[1], [0.5], [0.5], [0.25], [0.25], [0.25], [0.25]])\n', (29472, 29525), True, 'import numpy as np\n'), ((29563, 29617), 'numpy.zeros', 'np.zeros', (['(buffer_processing_matrix.shape[1], num_sim)'], {}), '((buffer_processing_matrix.shape[1], num_sim))\n', (29571, 29617), True, 'import numpy as np\n'), ((29631, 29649), 'numpy.arange', 'np.arange', (['num_sim'], {}), '(num_sim)\n', (29640, 29649), True, 'import numpy as np\n'), ((30013, 30031), 'numpy.random.seed', 'np.random.seed', (['(41)'], {}), '(41)\n', (30027, 30031), True, 'import numpy as np\n'), ((30080, 30113), 'numpy.array', 'np.array', (['[[10.0], [10.0], [0.0]]'], {}), '([[10.0], [10.0], [0.0]])\n', (30088, 30113), True, 'import numpy as np\n'), ((30142, 30275), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0, -1.0, -1.0, 0.0, -1.0], [0.0, -1.0, -1.0, 0.0, 0.0, 0.0, \n 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0]]'], {}), '([[-1.0, 0.0, 0.0, -1.0, -1.0, 0.0, -1.0], [0.0, -1.0, -1.0, 0.0, \n 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0]])\n', (30150, 30275), True, 'import numpy as np\n'), ((30358, 30484), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (30366, 30484), True, 'import numpy as np\n'), ((30783, 30821), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (30807, 30821), True, 'import snc.environments.controlled_random_walk as crw\n'), ((30834, 30900), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (30883, 30900), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((30932, 30992), 'numpy.array', 'np.array', (['[[1], [0.0], [1.0], [0.33], [0.33], [0.0], [0.33]]'], {}), '([[1], [0.0], [1.0], [0.33], [0.33], [0.0], [0.33]])\n', (30940, 30992), True, 'import numpy as np\n'), ((31401, 31460), 'numpy.all', 'np.all', (['(constituency_matrix_original == constituency_matrix)'], {}), '(constituency_matrix_original == constituency_matrix)\n', (31407, 31460), True, 'import numpy as np\n'), ((31472, 31535), 'numpy.all', 'np.all', (['(constituency_matrix_original == env.constituency_matrix)'], {}), '(constituency_matrix_original == env.constituency_matrix)\n', (31478, 31535), True, 'import numpy as np\n'), ((31672, 31706), 'numpy.array', 'np.array', (['[[10.0], [10.0], [10.0]]'], {}), '([[10.0], [10.0], [10.0]])\n', (31680, 31706), True, 'import numpy as np\n'), ((31735, 31867), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -1.0, -1.0, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0]])\n', (31743, 31867), True, 'import numpy as np\n'), ((31951, 32077), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])\n', (31959, 32077), True, 'import numpy as np\n'), ((32314, 32352), 'snc.environments.controlled_random_walk.ControlledRandomWalk', 'crw.ControlledRandomWalk', ([], {}), '(**env_params)\n', (32338, 32352), True, 'import snc.environments.controlled_random_walk as crw\n'), ((32365, 32431), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (32414, 32431), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((32455, 32500), 'numpy.array', 'np.array', (['[[1], [0], [1], [0], [0], [1], [0]]'], {}), '([[1], [0], [1], [0], [0], [1], [0]])\n', (32463, 32500), True, 'import numpy as np\n'), ((32565, 32598), 'numpy.all', 'np.all', (['(action == expected_action)'], {}), '(action == expected_action)\n', (32571, 32598), True, 'import numpy as np\n'), ((972, 1012), 'numpy.zeros', 'np.zeros', (['(num_resources, num_resources)'], {}), '((num_resources, num_resources))\n', (980, 1012), True, 'import numpy as np\n'), ((1076, 1096), 'numpy.zeros_like', 'np.zeros_like', (['state'], {}), '(state)\n', (1089, 1096), True, 'import numpy as np\n'), ((1118, 1138), 'numpy.zeros_like', 'np.zeros_like', (['state'], {}), '(state)\n', (1131, 1138), True, 'import numpy as np\n'), ((1217, 1338), 'snc.environments.job_generators.discrete_review_job_generator.DeterministicDiscreteReviewJobGenerator', 'drjg.DeterministicDiscreteReviewJobGenerator', (['arrival_rate', 'buffer_processing_matrix'], {'sim_time_interval': 'time_interval'}), '(arrival_rate,\n buffer_processing_matrix, sim_time_interval=time_interval)\n', (1261, 1338), True, 'import snc.environments.job_generators.discrete_review_job_generator as drjg\n'), ((1387, 1429), 'snc.environments.state_initialiser.DeterministicCRWStateInitialiser', 'si.DeterministicCRWStateInitialiser', (['state'], {}), '(state)\n', (1422, 1429), True, 'import snc.environments.state_initialiser as si\n'), ((1639, 1654), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (1646, 1654), True, 'import numpy as np\n'), ((1810, 1825), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (1817, 1825), True, 'import numpy as np\n'), ((2020, 2036), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (2028, 2036), True, 'import numpy as np\n'), ((2141, 2156), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (2148, 2156), True, 'import numpy as np\n'), ((2312, 2327), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (2319, 2327), True, 'import numpy as np\n'), ((2522, 2537), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (2529, 2537), True, 'import numpy as np\n'), ((2740, 2755), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (2747, 2755), True, 'import numpy as np\n'), ((2911, 2920), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2917, 2920), True, 'import numpy as np\n'), ((3334, 3349), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (3341, 3349), True, 'import numpy as np\n'), ((3505, 3514), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3511, 3514), True, 'import numpy as np\n'), ((3699, 3713), 'numpy.sum', 'np.sum', (['action'], {}), '(action)\n', (3705, 3713), True, 'import numpy as np\n'), ((3930, 3945), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (3937, 3945), True, 'import numpy as np\n'), ((4101, 4116), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (4108, 4116), True, 'import numpy as np\n'), ((4538, 4553), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (4545, 4553), True, 'import numpy as np\n'), ((4709, 4724), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (4716, 4724), True, 'import numpy as np\n'), ((4909, 4923), 'numpy.sum', 'np.sum', (['action'], {}), '(action)\n', (4915, 4923), True, 'import numpy as np\n'), ((5159, 5174), 'numpy.ones', 'np.ones', (['(4, 1)'], {}), '((4, 1))\n', (5166, 5174), True, 'import numpy as np\n'), ((5931, 5946), 'numpy.ones', 'np.ones', (['(4, 1)'], {}), '((4, 1))\n', (5938, 5946), True, 'import numpy as np\n'), ((6447, 6461), 'numpy.sum', 'np.sum', (['action'], {}), '(action)\n', (6453, 6461), True, 'import numpy as np\n'), ((6710, 6725), 'numpy.ones', 'np.ones', (['(4, 1)'], {}), '((4, 1))\n', (6717, 6725), True, 'import numpy as np\n'), ((6904, 6928), 'numpy.array', 'np.array', (['[[1, 1, 0, 0]]'], {}), '([[1, 1, 0, 0]])\n', (6912, 6928), True, 'import numpy as np\n'), ((6985, 7009), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (6993, 7009), True, 'import numpy as np\n'), ((7464, 7479), 'numpy.ones', 'np.ones', (['(4, 1)'], {}), '((4, 1))\n', (7471, 7479), True, 'import numpy as np\n'), ((7658, 7682), 'numpy.array', 'np.array', (['[[1, 1, 0, 0]]'], {}), '([[1, 1, 0, 0]])\n', (7666, 7682), True, 'import numpy as np\n'), ((7739, 7763), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (7747, 7763), True, 'import numpy as np\n'), ((7948, 7962), 'numpy.sum', 'np.sum', (['action'], {}), '(action)\n', (7954, 7962), True, 'import numpy as np\n'), ((8208, 8230), 'numpy.array', 'np.array', (['[4, 5, 5, 5]'], {}), '([4, 5, 5, 5])\n', (8216, 8230), True, 'import numpy as np\n'), ((8418, 8442), 'numpy.array', 'np.array', (['[[1, 1, 0, 0]]'], {}), '([[1, 1, 0, 0]])\n', (8426, 8442), True, 'import numpy as np\n'), ((8499, 8523), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (8507, 8523), True, 'import numpy as np\n'), ((9017, 9039), 'numpy.array', 'np.array', (['[5, 5, 5, 4]'], {}), '([5, 5, 5, 4])\n', (9025, 9039), True, 'import numpy as np\n'), ((9227, 9251), 'numpy.array', 'np.array', (['[[1, 1, 0, 0]]'], {}), '([[1, 1, 0, 0]])\n', (9235, 9251), True, 'import numpy as np\n'), ((9308, 9332), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (9316, 9332), True, 'import numpy as np\n'), ((9703, 9718), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (9710, 9718), True, 'import numpy as np\n'), ((9759, 9774), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (9766, 9774), True, 'import numpy as np\n'), ((9990, 10005), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (9997, 10005), True, 'import numpy as np\n'), ((10206, 10222), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (10214, 10222), True, 'import numpy as np\n'), ((10331, 10346), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (10338, 10346), True, 'import numpy as np\n'), ((10386, 10401), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (10393, 10401), True, 'import numpy as np\n'), ((10617, 10632), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (10624, 10632), True, 'import numpy as np\n'), ((10850, 10865), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (10857, 10865), True, 'import numpy as np\n'), ((11086, 11095), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (11092, 11095), True, 'import numpy as np\n'), ((11132, 11148), 'numpy.array', 'np.array', (['[9, 5]'], {}), '([9, 5])\n', (11140, 11148), True, 'import numpy as np\n'), ((11373, 11382), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (11379, 11382), True, 'import numpy as np\n'), ((11855, 11864), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (11861, 11864), True, 'import numpy as np\n'), ((11901, 11918), 'numpy.array', 'np.array', (['[9, 11]'], {}), '([9, 11])\n', (11909, 11918), True, 'import numpy as np\n'), ((12143, 12152), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (12149, 12152), True, 'import numpy as np\n'), ((12680, 12689), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (12686, 12689), True, 'import numpy as np\n'), ((12726, 12744), 'numpy.array', 'np.array', (['[11, 10]'], {}), '([11, 10])\n', (12734, 12744), True, 'import numpy as np\n'), ((12969, 12978), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (12975, 12978), True, 'import numpy as np\n'), ((13420, 13429), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (13426, 13429), True, 'import numpy as np\n'), ((13466, 13484), 'numpy.array', 'np.array', (['[30, 20]'], {}), '([30, 20])\n', (13474, 13484), True, 'import numpy as np\n'), ((13709, 13718), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (13715, 13718), True, 'import numpy as np\n'), ((14212, 14221), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (14218, 14221), True, 'import numpy as np\n'), ((14258, 14276), 'numpy.array', 'np.array', (['[20, 30]'], {}), '([20, 30])\n', (14266, 14276), True, 'import numpy as np\n'), ((14501, 14510), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (14507, 14510), True, 'import numpy as np\n'), ((14943, 14952), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (14949, 14952), True, 'import numpy as np\n'), ((14989, 15007), 'numpy.array', 'np.array', (['[11, 11]'], {}), '([11, 11])\n', (14997, 15007), True, 'import numpy as np\n'), ((15232, 15241), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (15238, 15241), True, 'import numpy as np\n'), ((15432, 15446), 'numpy.sum', 'np.sum', (['action'], {}), '(action)\n', (15438, 15446), True, 'import numpy as np\n'), ((16077, 16103), 'numpy.array', 'np.array', (['[30, 20, 20, 30]'], {}), '([30, 20, 20, 30])\n', (16085, 16103), True, 'import numpy as np\n'), ((16351, 16375), 'numpy.array', 'np.array', (['[[1, 1, 0, 0]]'], {}), '([[1, 1, 0, 0]])\n', (16359, 16375), True, 'import numpy as np\n'), ((16432, 16456), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (16440, 16456), True, 'import numpy as np\n'), ((17328, 17354), 'numpy.array', 'np.array', (['[20, 30, 30, 20]'], {}), '([20, 30, 30, 20])\n', (17336, 17354), True, 'import numpy as np\n'), ((17602, 17626), 'numpy.array', 'np.array', (['[[1, 1, 0, 0]]'], {}), '([[1, 1, 0, 0]])\n', (17610, 17626), True, 'import numpy as np\n'), ((17683, 17707), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (17691, 17707), True, 'import numpy as np\n'), ((18577, 18601), 'numpy.array', 'np.array', (['[30, 20, 9, 5]'], {}), '([30, 20, 9, 5])\n', (18585, 18601), True, 'import numpy as np\n'), ((18849, 18873), 'numpy.array', 'np.array', (['[[1, 1, 0, 0]]'], {}), '([[1, 1, 0, 0]])\n', (18857, 18873), True, 'import numpy as np\n'), ((18930, 18954), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (18938, 18954), True, 'import numpy as np\n'), ((19828, 19852), 'numpy.array', 'np.array', (['[30, 30, 9, 5]'], {}), '([30, 30, 9, 5])\n', (19836, 19852), True, 'import numpy as np\n'), ((20100, 20124), 'numpy.array', 'np.array', (['[[1, 1, 0, 0]]'], {}), '([[1, 1, 0, 0]])\n', (20108, 20124), True, 'import numpy as np\n'), ((20181, 20205), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (20189, 20205), True, 'import numpy as np\n'), ((21097, 21122), 'numpy.array', 'np.array', (['[30, 20, 5, 20]'], {}), '([30, 20, 5, 20])\n', (21105, 21122), True, 'import numpy as np\n'), ((21370, 21408), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 1, 0]]'], {}), '([[1, 1, 0, 0], [0, 0, 1, 0]])\n', (21378, 21408), True, 'import numpy as np\n'), ((21465, 21489), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (21473, 21489), True, 'import numpy as np\n'), ((24478, 24507), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (24491, 24507), False, 'import pytest\n'), ((24521, 24587), 'snc.agents.general_heuristics.custom_activity_priority_agent.CustomActivityPriorityAgent', 'custom_priority_agent.CustomActivityPriorityAgent', (['env', 'priorities'], {}), '(env, priorities)\n', (24570, 24587), True, 'import snc.agents.general_heuristics.custom_activity_priority_agent as custom_priority_agent\n'), ((28355, 28385), 'numpy.sum', 'np.sum', (['updated_action'], {'axis': '(1)'}), '(updated_action, axis=1)\n', (28361, 28385), True, 'import numpy as np\n'), ((29738, 29761), 'numpy.sum', 'np.sum', (['actions'], {'axis': '(1)'}), '(actions, axis=1)\n', (29744, 29761), True, 'import numpy as np\n'), ((31207, 31230), 'numpy.sum', 'np.sum', (['actions'], {'axis': '(1)'}), '(actions, axis=1)\n', (31213, 31230), True, 'import numpy as np\n'), ((3122, 3138), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (3130, 3138), True, 'import numpy as np\n'), ((4318, 4334), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (4326, 4334), True, 'import numpy as np\n'), ((5692, 5708), 'numpy.zeros', 'np.zeros', (['(4, 1)'], {}), '((4, 1))\n', (5700, 5708), True, 'import numpy as np\n'), ((7211, 7227), 'numpy.zeros', 'np.zeros', (['(4, 1)'], {}), '((4, 1))\n', (7219, 7227), True, 'import numpy as np\n'), ((8708, 8727), 'numpy.sum', 'np.sum', (['action[2:4]'], {}), '(action[2:4])\n', (8714, 8727), True, 'import numpy as np\n'), ((9517, 9536), 'numpy.sum', 'np.sum', (['action[0:2]'], {}), '(action[0:2])\n', (9523, 9536), True, 'import numpy as np\n'), ((11590, 11606), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (11598, 11606), True, 'import numpy as np\n'), ((20396, 20415), 'numpy.sum', 'np.sum', (['action[0:2]'], {}), '(action[0:2])\n', (20402, 20415), True, 'import numpy as np\n'), ((867, 904), 'numpy.ones', 'np.ones', (['(num_buffers, num_resources)'], {}), '((num_buffers, num_resources))\n', (874, 904), True, 'import numpy as np\n'), ((5364, 5373), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (5370, 5373), True, 'import numpy as np\n'), ((5375, 5391), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (5383, 5391), True, 'import numpy as np\n'), ((5461, 5477), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (5469, 5477), True, 'import numpy as np\n'), ((5479, 5488), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (5485, 5488), True, 'import numpy as np\n'), ((6136, 6145), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (6142, 6145), True, 'import numpy as np\n'), ((6147, 6163), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (6155, 6163), True, 'import numpy as np\n'), ((6233, 6249), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (6241, 6249), True, 'import numpy as np\n'), ((6251, 6260), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (6257, 6260), True, 'import numpy as np\n'), ((8759, 8775), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (8767, 8775), True, 'import numpy as np\n'), ((9568, 9584), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (9576, 9584), True, 'import numpy as np\n'), ((12360, 12376), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (12368, 12376), True, 'import numpy as np\n'), ((13186, 13202), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (13194, 13202), True, 'import numpy as np\n'), ((13926, 13942), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (13934, 13942), True, 'import numpy as np\n'), ((14718, 14734), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (14726, 14734), True, 'import numpy as np\n'), ((16664, 16686), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (16672, 16686), True, 'import numpy as np\n'), ((17915, 17937), 'numpy.array', 'np.array', (['[0, 1, 1, 0]'], {}), '([0, 1, 1, 0])\n', (17923, 17937), True, 'import numpy as np\n'), ((19162, 19184), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (19170, 19184), True, 'import numpy as np\n'), ((20447, 20463), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (20455, 20463), True, 'import numpy as np\n')]
import torch from torchvision.transforms import Compose,Normalize,RandomCrop,RandomResizedCrop,Resize,RandomHorizontalFlip, ToTensor from torchvision import transforms def get_transforms(): normalize = Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) transform = Compose([normalize]) return transform
[ "torchvision.transforms.Normalize", "torchvision.transforms.Compose" ]
[((208, 272), 'torchvision.transforms.Normalize', 'Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (217, 272), False, 'from torchvision.transforms import Compose, Normalize, RandomCrop, RandomResizedCrop, Resize, RandomHorizontalFlip, ToTensor\n'), ((288, 308), 'torchvision.transforms.Compose', 'Compose', (['[normalize]'], {}), '([normalize])\n', (295, 308), False, 'from torchvision.transforms import Compose, Normalize, RandomCrop, RandomResizedCrop, Resize, RandomHorizontalFlip, ToTensor\n')]
import numpy as np import matplotlib.pyplot as plt import pprint def missingIsNan(s): return np.nan if s == b'?' else float(s) def makeStandardize(X): means = X.mean(axis = 0) stds = X.std(axis = 0) def standardize(origX): return (origX - means) / stds def unstandardize(stdX): return stds * stdX + means return (standardize, unstandardize) if __name__ == '__main__': # 1. Load the data. data = np.loadtxt("Data\\auto-mpg.data", usecols = range(8), converters = {3: missingIsNan}) # 2. 'Clean' the data. Cdata = data[~np.isnan(data).any(axis = 1)] # 3. Split it into input (X) and target (T) # Target = mpg (first column) # Input = remaining - columns 2 to 7 T = Cdata[:, 0:1] X = Cdata[:, 1:] # 4. Append column of 1s to X # X1 = np.insert(X, 0, 1, 1) # 4. Split the data into training (80 %) and testing data (20 %) nRows = X.shape[0] nTrain = int(round(0.8*nRows)) nTest = nRows - nTrain # Shuffle row numbers rows = np.arange(nRows) np.random.shuffle(rows) trainIndices = rows[:nTrain] testIndices = rows[nTrain:] # Check that training and testing sets are disjoint # print(np.intersect1d(trainIndices, testIndices)) Xtrain = X[trainIndices, :] Ttrain = T[trainIndices, :] Xtest = X[testIndices, :] Ttest = T[testIndices, :] # 5. Standardize (standardize, unstandardize) = makeStandardize(Xtrain) XtrainS = standardize(Xtrain) XtestS = standardize(Xtest) # 6. Tack column of 1s XtrainS1 = np.insert(XtrainS, 0, 1, 1) XtestS1 = np.insert(XtestS, 0, 1, 1) # 7. Find weights (solve for w) w = np.linalg.lstsq(XtrainS1.T @ XtrainS1, XtrainS1.T @ Ttrain, rcond = None)[0] # 8. Predict predict = XtestS1 @ w # 9. Compute RSME rsme = np.sqrt(np.mean((predict - Ttest)**2)) print(rsme)
[ "numpy.linalg.lstsq", "numpy.isnan", "numpy.insert", "numpy.mean", "numpy.arange", "numpy.random.shuffle" ]
[((1004, 1020), 'numpy.arange', 'np.arange', (['nRows'], {}), '(nRows)\n', (1013, 1020), True, 'import numpy as np\n'), ((1023, 1046), 'numpy.random.shuffle', 'np.random.shuffle', (['rows'], {}), '(rows)\n', (1040, 1046), True, 'import numpy as np\n'), ((1517, 1544), 'numpy.insert', 'np.insert', (['XtrainS', '(0)', '(1)', '(1)'], {}), '(XtrainS, 0, 1, 1)\n', (1526, 1544), True, 'import numpy as np\n'), ((1557, 1583), 'numpy.insert', 'np.insert', (['XtestS', '(0)', '(1)', '(1)'], {}), '(XtestS, 0, 1, 1)\n', (1566, 1583), True, 'import numpy as np\n'), ((1627, 1698), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['(XtrainS1.T @ XtrainS1)', '(XtrainS1.T @ Ttrain)'], {'rcond': 'None'}), '(XtrainS1.T @ XtrainS1, XtrainS1.T @ Ttrain, rcond=None)\n', (1642, 1698), True, 'import numpy as np\n'), ((1784, 1815), 'numpy.mean', 'np.mean', (['((predict - Ttest) ** 2)'], {}), '((predict - Ttest) ** 2)\n', (1791, 1815), True, 'import numpy as np\n'), ((563, 577), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (571, 577), True, 'import numpy as np\n')]
import logging from abc_core.database.sqllite_client import SQLLite from abc_core.utils.logger_client import get_basis_logger_config def main(): logging.basicConfig(**get_basis_logger_config()) db = SQLLite(filename="../../data/application.db") res = db.select("SELECT * FROM blogs1") print(res) # for i in range(1): # db.insert( # query="INSERT INTO blogs VALUES (?,?,?,?,?);", # data=(f'private-blog{i+10}', '2021-03-07', 'Secret blog' ,'This is a secret',3) # ) # res = db.select("SELECT * FROM blogs WHERE public >= 3") # print(res) # (id text not null primary key, date text, title text, content text, public integer db.close_connection() if __name__ == "__main__": main()
[ "abc_core.database.sqllite_client.SQLLite", "abc_core.utils.logger_client.get_basis_logger_config" ]
[((210, 255), 'abc_core.database.sqllite_client.SQLLite', 'SQLLite', ([], {'filename': '"""../../data/application.db"""'}), "(filename='../../data/application.db')\n", (217, 255), False, 'from abc_core.database.sqllite_client import SQLLite\n'), ((174, 199), 'abc_core.utils.logger_client.get_basis_logger_config', 'get_basis_logger_config', ([], {}), '()\n', (197, 199), False, 'from abc_core.utils.logger_client import get_basis_logger_config\n')]
import torch.nn as nn from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial class BBBSqueezeNet(nn.Module): """ SqueezeNet with slightly modified Fire modules and Bayesian layers. """ def __init__(self, outputs, inputs): super(BBBSqueezeNet, self).__init__() self.conv1 = BBBConv2d(inputs, 64, kernel_size=3, stride=2) self.soft1 = nn.Softplus() self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 1 self.squeeze1 = BBBConv2d(64, 16, kernel_size=1) self.squeeze_activation1 = nn.Softplus() self.expand3x3_1 = BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation1 = nn.Softplus() # Fire module 2 self.squeeze2 = BBBConv2d(128, 16, kernel_size=1) self.squeeze_activation2 = nn.Softplus() self.expand3x3_2 = BBBConv2d(16, 128, kernel_size=3, padding=1) self.expand3x3_activation2 = nn.Softplus() self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 3 self.squeeze3 = BBBConv2d(128, 32, kernel_size=1) self.squeeze_activation3 = nn.Softplus() self.expand3x3_3 = BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation3 = nn.Softplus() # Fire module 4 self.squeeze4 = BBBConv2d(256, 32, kernel_size=1) self.squeeze_activation4 = nn.Softplus() self.expand3x3_4 = BBBConv2d(32, 256, kernel_size=3, padding=1) self.expand3x3_activation4 = nn.Softplus() self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) # Fire module 5 self.squeeze5 = BBBConv2d(256, 48, kernel_size=1) self.squeeze_activation5 = nn.Softplus() self.expand3x3_5 = BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation5 = nn.Softplus() # Fire module 6 self.squeeze6 = BBBConv2d(384, 48, kernel_size=1) self.squeeze_activation6 = nn.Softplus() self.expand3x3_6 = BBBConv2d(48, 384, kernel_size=3, padding=1) self.expand3x3_activation6 = nn.Softplus() # Fire module 7 self.squeeze7 = BBBConv2d(384, 64, kernel_size=1) self.squeeze_activation7 = nn.Softplus() self.expand3x3_7 = BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation7 = nn.Softplus() # Fire module 8 self.squeeze8 = BBBConv2d(512, 64, kernel_size=1) self.squeeze_activation8 = nn.Softplus() self.expand3x3_8 = BBBConv2d(64, 512, kernel_size=3, padding=1) self.expand3x3_activation8 = nn.Softplus() self.drop1 = nn.Dropout(p=0.5) self.conv2 = BBBConv2d(512, outputs, kernel_size=1) self.soft2 = nn.Softplus() self.flatten = FlattenLayer(13 * 13 * 100) self.fc1 = BBBLinearFactorial(13 * 13 * 100, outputs) layers = [self.conv1, self.soft1, self.pool1, self.squeeze1, self.squeeze_activation1, self.expand3x3_1, self.expand3x3_activation1, self.squeeze2, self.squeeze_activation2, self.expand3x3_2, self.expand3x3_activation2, self.pool2, self.squeeze3, self.squeeze_activation3, self.expand3x3_3, self.expand3x3_activation3, self.squeeze4, self.squeeze_activation4, self.expand3x3_4, self.expand3x3_activation4, self.pool3, self.squeeze5, self.squeeze_activation5, self.expand3x3_5, self.expand3x3_activation5, self.squeeze6, self.squeeze_activation6, self.expand3x3_6, self.expand3x3_activation6, self.squeeze7, self.squeeze_activation7, self.expand3x3_7, self.expand3x3_activation7, self.squeeze8, self.squeeze_activation8, self.expand3x3_8, self.expand3x3_activation8, self.drop1, self.conv2, self.soft2, self.flatten, self.fc1] self.layers = nn.ModuleList(layers) def probforward(self, x): 'Forward pass with Bayesian weights' kl = 0 for layer in self.layers: if hasattr(layer, 'convprobforward') and callable(layer.convprobforward): x, _kl, = layer.convprobforward(x) kl += _kl elif hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward): x, _kl, = layer.fcprobforward(x) kl += _kl else: x = layer(x) logits = x print('logits', logits) return logits, kl
[ "torch.nn.Dropout", "torch.nn.ModuleList", "utils.BBBlayers.BBBConv2d", "torch.nn.Softplus", "utils.BBBlayers.FlattenLayer", "torch.nn.MaxPool2d", "utils.BBBlayers.BBBLinearFactorial" ]
[((325, 371), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['inputs', '(64)'], {'kernel_size': '(3)', 'stride': '(2)'}), '(inputs, 64, kernel_size=3, stride=2)\n', (334, 371), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((393, 406), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (404, 406), True, 'import torch.nn as nn\n'), ((428, 481), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=3, stride=2, ceil_mode=True)\n', (440, 481), True, 'import torch.nn as nn\n'), ((531, 563), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(64)', '(16)'], {'kernel_size': '(1)'}), '(64, 16, kernel_size=1)\n', (540, 563), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((599, 612), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (610, 612), True, 'import torch.nn as nn\n'), ((640, 684), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(16)', '(128)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(16, 128, kernel_size=3, padding=1)\n', (649, 684), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((722, 735), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (733, 735), True, 'import torch.nn as nn\n'), ((785, 818), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(128)', '(16)'], {'kernel_size': '(1)'}), '(128, 16, kernel_size=1)\n', (794, 818), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((854, 867), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (865, 867), True, 'import torch.nn as nn\n'), ((895, 939), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(16)', '(128)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(16, 128, kernel_size=3, padding=1)\n', (904, 939), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((977, 990), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (988, 990), True, 'import torch.nn as nn\n'), ((1013, 1066), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=3, stride=2, ceil_mode=True)\n', (1025, 1066), True, 'import torch.nn as nn\n'), ((1116, 1149), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(128)', '(32)'], {'kernel_size': '(1)'}), '(128, 32, kernel_size=1)\n', (1125, 1149), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((1185, 1198), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (1196, 1198), True, 'import torch.nn as nn\n'), ((1226, 1270), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(32)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(32, 256, kernel_size=3, padding=1)\n', (1235, 1270), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((1308, 1321), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (1319, 1321), True, 'import torch.nn as nn\n'), ((1371, 1404), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(256)', '(32)'], {'kernel_size': '(1)'}), '(256, 32, kernel_size=1)\n', (1380, 1404), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((1440, 1453), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (1451, 1453), True, 'import torch.nn as nn\n'), ((1481, 1525), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(32)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(32, 256, kernel_size=3, padding=1)\n', (1490, 1525), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((1563, 1576), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (1574, 1576), True, 'import torch.nn as nn\n'), ((1599, 1652), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=3, stride=2, ceil_mode=True)\n', (1611, 1652), True, 'import torch.nn as nn\n'), ((1702, 1735), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(256)', '(48)'], {'kernel_size': '(1)'}), '(256, 48, kernel_size=1)\n', (1711, 1735), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((1771, 1784), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (1782, 1784), True, 'import torch.nn as nn\n'), ((1812, 1856), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(48)', '(384)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(48, 384, kernel_size=3, padding=1)\n', (1821, 1856), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((1894, 1907), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (1905, 1907), True, 'import torch.nn as nn\n'), ((1957, 1990), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(384)', '(48)'], {'kernel_size': '(1)'}), '(384, 48, kernel_size=1)\n', (1966, 1990), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((2026, 2039), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (2037, 2039), True, 'import torch.nn as nn\n'), ((2067, 2111), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(48)', '(384)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(48, 384, kernel_size=3, padding=1)\n', (2076, 2111), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((2149, 2162), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (2160, 2162), True, 'import torch.nn as nn\n'), ((2212, 2245), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(384)', '(64)'], {'kernel_size': '(1)'}), '(384, 64, kernel_size=1)\n', (2221, 2245), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((2281, 2294), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (2292, 2294), True, 'import torch.nn as nn\n'), ((2322, 2366), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(64)', '(512)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(64, 512, kernel_size=3, padding=1)\n', (2331, 2366), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((2404, 2417), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (2415, 2417), True, 'import torch.nn as nn\n'), ((2467, 2500), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(512)', '(64)'], {'kernel_size': '(1)'}), '(512, 64, kernel_size=1)\n', (2476, 2500), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((2536, 2549), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (2547, 2549), True, 'import torch.nn as nn\n'), ((2577, 2621), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(64)', '(512)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(64, 512, kernel_size=3, padding=1)\n', (2586, 2621), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((2659, 2672), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (2670, 2672), True, 'import torch.nn as nn\n'), ((2695, 2712), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (2705, 2712), True, 'import torch.nn as nn\n'), ((2734, 2772), 'utils.BBBlayers.BBBConv2d', 'BBBConv2d', (['(512)', 'outputs'], {'kernel_size': '(1)'}), '(512, outputs, kernel_size=1)\n', (2743, 2772), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((2794, 2807), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (2805, 2807), True, 'import torch.nn as nn\n'), ((2831, 2858), 'utils.BBBlayers.FlattenLayer', 'FlattenLayer', (['(13 * 13 * 100)'], {}), '(13 * 13 * 100)\n', (2843, 2858), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((2878, 2920), 'utils.BBBlayers.BBBLinearFactorial', 'BBBLinearFactorial', (['(13 * 13 * 100)', 'outputs'], {}), '(13 * 13 * 100, outputs)\n', (2896, 2920), False, 'from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial\n'), ((3977, 3998), 'torch.nn.ModuleList', 'nn.ModuleList', (['layers'], {}), '(layers)\n', (3990, 3998), True, 'import torch.nn as nn\n')]
from django.contrib import admin from .models import Balance class BalanceAdmin(admin.ModelAdmin): list_display = ('balance',) admin.site.register(Balance)
[ "django.contrib.admin.site.register" ]
[((134, 162), 'django.contrib.admin.site.register', 'admin.site.register', (['Balance'], {}), '(Balance)\n', (153, 162), False, 'from django.contrib import admin\n')]
import discord from discord.ext import commands import json, requests, io, re class Weather: """Weather class handles weather using openweather api params: attributes: apikey: api key for openweather config_location: configuration location for saberbot locations: json file containing the openweathermap location data """ def __init__(self, bot): self.bot = bot self.conf = self.bot.config["weather"] self.apikey = self.conf["apikey"] with open(self.conf["citylist"]) as jsonfile: self.locations_json = json.loads(jsonfile.read()) def parsequery(self, *args): """parses list of argument to string""" querystring = "" keywords = {} print(args) for arg in args: if "=" in arg: larg = arg.split("=") keywords[larg[0]] = larg[1] continue querystring += f" {str(arg)}" querystring = querystring.lstrip() return querystring, keywords def get_location_id(self, location, country): print(location) for item in self.locations_json: if item["name"] == location: if not country or item["country"]== country.upper(): return str(item["id"]) return None def get_data(self, id, url_string): """params: id - location id returns: data - dictionary object containing json response""" response = requests.get(url_string) data = json.loads(response.text) return data def CtoF(self, c): return (9/5)*c+32 @commands.command(pass_context=True) @commands.cooldown(1, 5.0, commands.BucketType.server) async def weather(self, ctx, *args): """Search for weather by city and optionally a country usage: !weather <city>, optionally specify country=<ID>, for example !weather London country=UK""" relevant = {} location, keywords = self.parsequery(*args) if keywords: country = keywords["country"] else: country = "" regex = re.compile("([^\w\s{1}]|\d|_|\s+)") #\W_ didn't work in testing for some reason? location = re.sub(regex, "", location) #transform location into string with spaces l = [] l.append(country) l.append(location) print(l) location_id = self.get_location_id(location, country) if location_id != None: weather_url=f"https://api.openweathermap.org/data/2.5/weather?id={location_id}&units=metric&APPID={self.apikey}" forecast_url=f"https://api.openweathermap.org/data/2.5/forecast/?id={location_id}&cnt=1&units=metric&APPID={self.apikey}" weatherdata = self.get_data(location_id, weather_url) forecastdata = self.get_data(location_id, forecast_url) country = weatherdata["sys"]["country"] print(weatherdata) relevant["today"] = {"desc" : weatherdata["weather"][0]["description"], "temp" : weatherdata["main"]["temp"]} relevant["tomorrow"] = {"desc" : forecastdata["list"][0]["weather"][0]["description"], "temp" : forecastdata["list"][0]["main"]["temp"]} await self.bot.send_message(ctx.message.channel, f"weather for {location}, {country}: today {relevant['today']['desc']} {int(relevant['today']['temp'])} °C / {int(self.CtoF(relevant['today']['temp']))} °F") await self.bot.send_message(ctx.message.channel, f"tomorrow: {relevant['tomorrow']['desc']}, {int(relevant['tomorrow']['temp'])} °C / {int(self.CtoF(relevant['tomorrow']['temp']))} °F") else: await self.bot.send_message(ctx.message.channel, f"Sorry, I don't know where {location} is") def setup(bot): bot.add_cog(Weather(bot))
[ "discord.ext.commands.command", "json.loads", "discord.ext.commands.cooldown", "requests.get", "re.sub", "re.compile" ]
[((1680, 1715), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (1696, 1715), False, 'from discord.ext import commands\n'), ((1721, 1774), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5.0)', 'commands.BucketType.server'], {}), '(1, 5.0, commands.BucketType.server)\n', (1738, 1774), False, 'from discord.ext import commands\n'), ((1526, 1550), 'requests.get', 'requests.get', (['url_string'], {}), '(url_string)\n', (1538, 1550), False, 'import json, requests, io, re\n'), ((1566, 1591), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (1576, 1591), False, 'import json, requests, io, re\n'), ((2180, 2219), 're.compile', 're.compile', (['"""([^\\\\w\\\\s{1}]|\\\\d|_|\\\\s+)"""'], {}), "('([^\\\\w\\\\s{1}]|\\\\d|_|\\\\s+)')\n", (2190, 2219), False, 'import json, requests, io, re\n'), ((2280, 2307), 're.sub', 're.sub', (['regex', '""""""', 'location'], {}), "(regex, '', location)\n", (2286, 2307), False, 'import json, requests, io, re\n')]
from django.contrib import admin from .models import Article class ArticleAdmin(admin.ModelAdmin): model = Article admin.site.register(Article)
[ "django.contrib.admin.site.register" ]
[((122, 150), 'django.contrib.admin.site.register', 'admin.site.register', (['Article'], {}), '(Article)\n', (141, 150), False, 'from django.contrib import admin\n')]
import os from databroker.assets.handlers_base import HandlerBase from databroker.assets.base_registry import DuplicateHandler import fabio # for backward compatibility, fpp was always 1 before Jan 2018 #global pilatus_fpp #pilatus_fpp = 1 # this is used by the CBF file handler from enum import Enum class triggerMode(Enum): software_trigger_single_frame = 1 software_trigger_multi_frame = 2 external_trigger = 3 fly_scan = 4 #external_trigger_multi_frame = 5 # this is unnecessary, difference is fpp #global pilatus_trigger_mode #global default_data_path_root #global substitute_data_path_root #global CBF_replace_data_path #pilatus_trigger_mode = triggerMode.software_trigger_single_frame # if the cbf files have been moved already #CBF_replace_data_path = False class PilatusCBFHandler(HandlerBase): specs = {'AD_CBF'} | HandlerBase.specs froot = data_file_path.gpfs subdir = None trigger_mode = triggerMode.software_trigger_single_frame # assuming that the data files always have names with these extensions std_image_size = { 'SAXS': (1043, 981), 'WAXS1': (619, 487), 'WAXS2': (1043, 981) # orignal WAXS2 was (619, 487) } def __init__(self, rpath, template, filename, frame_per_point=1, initial_number=1): print(f'Initializing CBF handler for {self.trigger_mode} ...') self._template = template self._fpp = frame_per_point self._filename = filename self._initial_number = initial_number self._image_size = None self._default_path = os.path.join(rpath, '') self._path = "" for k in self.std_image_size: if template.find(k)>=0: self._image_size = self.std_image_size[k] if self._image_size is None: raise Exception(f'Unrecognized data file extension in filename template: {template}') for fr in data_file_path: if self._default_path.find(fr.value)==0: self._dir = self._default_path[len(fr.value):] return raise Exception(f"invalid file path: {self._default_path}") def update_path(self): # this is a workaround for data that are save in /exp_path then moved to /nsls2/xf16id1/exp_path if not self.froot in data_file_path: raise Exception(f"invalid froot: {self.froot}") self._path = self.froot.value+self._dir print(f"updating path, will read data from {self._path} ...") def get_data(self, fn): """ the file may not exist """ try: img = fabio.open(fn) data = img.data if data.shape!=self._image_size: print(f'got incorrect image size from {fn}: {data.shape}') #, return an empty frame instead.') except: print(f'could not read {fn}, return an empty frame instead.') data = np.zeros(self._image_size) #print(data.shape) return data def __call__(self, point_number): start = self._initial_number #+ point_number stop = start + 1 ret = [] tplt = self._template.replace("6.6d", "06d") # some early templates are not correctly formatted tl = tplt.replace(".", "_").split("_") # e.g. ['%s%s', '%06d', 'SAXS', 'cbf'], ['%s%s', '%06d', 'SAXS', '%05d', 'cbf'] # resulting in file names like test_000125_SAXS.cbf vs test_000125_SAXS_00001.cbf if self.trigger_mode != triggerMode.software_trigger_single_frame and self._fpp>1: # the template needs to have two number fileds if len(tl)==4: tl = tl[:-1]+["%05d"]+tl[-1:] elif len(tl)==5: tl = tl[:-2]+tl[-1:] self._template = "_".join(tl[:-1])+"."+tl[-1] print("CBF handler called: start=%d, stop=%d" % (start, stop)) print(" ", self._initial_number, point_number, self._fpp) print(" ", self._template, self._path, self._initial_number) self.update_path() if self.subdir is not None: self._path += f"{self.subdir}/" if self.trigger_mode == triggerMode.software_trigger_single_frame or self._fpp == 1: fn = self._template % (self._path, self._filename, self._initial_number+point_number) ret.append(self.get_data(fn)) elif self.trigger_mode in [triggerMode.software_trigger_multi_frame, triggerMode.fly_scan]: for i in range(self._fpp): fn = self._template % (self._path, self._filename, start, point_number+i) ret.append(self.get_data(fn)) elif self.trigger_mode==triggerMode.external_trigger: fn = self._template % (self._path, self._filename, start, point_number) ret.append(self.get_data(fn)) return np.array(ret).squeeze() db.reg.register_handler('AD_CBF', PilatusCBFHandler, overwrite=True)
[ "os.path.join", "fabio.open" ]
[((1593, 1616), 'os.path.join', 'os.path.join', (['rpath', '""""""'], {}), "(rpath, '')\n", (1605, 1616), False, 'import os\n'), ((2631, 2645), 'fabio.open', 'fabio.open', (['fn'], {}), '(fn)\n', (2641, 2645), False, 'import fabio\n')]
from typing import Tuple, Dict import random import numpy as np import torch from torchvision import datasets, transforms from sklearn.metrics.pairwise import cosine_distances from matplotlib import pyplot as plt try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter CIFAR10_ANNOTATION = { 0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck' } def plot_cifar_image(image, label=""): plt.title(label) plt.imshow(image.permute(1, 2, 0).numpy()) plt.show() class AccumulateStats: def __enter__(self): pass def __exit__(self): pass def __call__(self): pass class AverageMeter(object): """ Computes and stores the average and current value """ def __init__(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count class MeterLogger: def __init__(self, meters: Tuple[str], writer: SummaryWriter): self.average_meters: Dict[str, AverageMeter] = {k: AverageMeter() for k in meters} self._writer = writer def update(self, name: str, val, n=1): self.average_meters[name].update(val, n) def reset(self): for meter in self.average_meters.values(): meter.reset() def write(self, step, prefix): for name, meter in self.average_meters.items(): tag = prefix + '/' + name self._writer.add_scalar(tag, meter.avg, step) class ImageLogger: def __init__(self, writer: SummaryWriter, mean=None, std=None): self._writer = writer self.mean = mean self.std = std if self.mean is not None: self.mean = torch.tensor(self.mean).reshape(1, 3, 1, 1) if self.std is not None: self.std = torch.tensor(self.std).reshape(1, 3, 1, 1) def write(self, images, reconstruction, step, prefix): images = images.cpu() reconstruction = reconstruction.cpu() if self.mean is not None and self.std is not None: images = images * self.std + self.mean reconstruction = reconstruction * self.std + self.mean image_tag = prefix + '/' + 'image' self._writer.add_images(image_tag, images, step) reconstruction_tag = prefix + '/' + 'reconstruction' self._writer.add_images(reconstruction_tag, reconstruction, step) class VQEmbeddingLogger: def __init__(self, writer: SummaryWriter): self._writer = writer def write(self, embeddings, step): embeddings = embeddings.detach().cpu().numpy() sim = cosine_distances(embeddings) self._writer.add_image('cos_sim_vq_embeddings', sim, step, dataformats='HW') def double_soft_orthogonality(weights: torch.Tensor): a = torch.norm(weights @ weights.t() - torch.eye(weights.shape[0]).to(weights.device)) ** 2 b = torch.norm(weights.t() @ weights - torch.eye(weights.shape[1]).to(weights.device)) ** 2 return a + b def set_random_seed(seed: int, cuda: bool = False): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if cuda: torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.cuda.random.manual_seed(seed) torch.backends.cudnn.deterministic = True
[ "matplotlib.pyplot.title", "sklearn.metrics.pairwise.cosine_distances", "numpy.random.seed", "matplotlib.pyplot.show", "torch.eye", "torch.manual_seed", "torch.cuda.random.manual_seed", "torch.cuda.manual_seed", "torch.cuda.manual_seed_all", "random.seed", "torch.tensor" ]
[((569, 585), 'matplotlib.pyplot.title', 'plt.title', (['label'], {}), '(label)\n', (578, 585), True, 'from matplotlib import pyplot as plt\n'), ((637, 647), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (645, 647), True, 'from matplotlib import pyplot as plt\n'), ((3421, 3438), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3432, 3438), False, 'import random\n'), ((3443, 3463), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3457, 3463), True, 'import numpy as np\n'), ((3468, 3491), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (3485, 3491), False, 'import torch\n'), ((2982, 3010), 'sklearn.metrics.pairwise.cosine_distances', 'cosine_distances', (['embeddings'], {}), '(embeddings)\n', (2998, 3010), False, 'from sklearn.metrics.pairwise import cosine_distances\n'), ((3514, 3542), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (3536, 3542), False, 'import torch\n'), ((3551, 3583), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (3577, 3583), False, 'import torch\n'), ((3592, 3627), 'torch.cuda.random.manual_seed', 'torch.cuda.random.manual_seed', (['seed'], {}), '(seed)\n', (3621, 3627), False, 'import torch\n'), ((2073, 2096), 'torch.tensor', 'torch.tensor', (['self.mean'], {}), '(self.mean)\n', (2085, 2096), False, 'import torch\n'), ((2174, 2196), 'torch.tensor', 'torch.tensor', (['self.std'], {}), '(self.std)\n', (2186, 2196), False, 'import torch\n'), ((3196, 3223), 'torch.eye', 'torch.eye', (['weights.shape[0]'], {}), '(weights.shape[0])\n', (3205, 3223), False, 'import torch\n'), ((3292, 3319), 'torch.eye', 'torch.eye', (['weights.shape[1]'], {}), '(weights.shape[1])\n', (3301, 3319), False, 'import torch\n')]
from __future__ import absolute_import from email.parser import FeedParser import logging import os from pip.basecommand import Command from pip.status_codes import SUCCESS, ERROR from pip._vendor import pkg_resources logger = logging.getLogger(__name__) class ShowCommand(Command): """Show information about one or more installed packages.""" name = 'show' usage = """ %prog [options] <package> ...""" summary = 'Show information about installed packages.' def __init__(self, *args, **kw): super(ShowCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '-f', '--files', dest='files', action='store_true', default=False, help='Show the full list of installed files for each package.') self.cmd_opts.add_option( '--index', dest='index', metavar='URL', default='https://pypi.python.org/pypi', help='Base URL of Python Package Index (default %default)') self.cmd_opts.add_option( '-p', '--pypi', dest='pypi', action='store_true', default=False, help='Show PyPi version') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): if not args: logger.warning('ERROR: Please provide a package name or names.') return ERROR query = args if options.pypi: with self._build_session(options) as session: results = search_packages_info(query, options.index, session) else: results = search_packages_info(query, options.index) if not print_results(results, options.files): return ERROR return SUCCESS def _format_package(requirement): r = requirement installed_ver = '-' try: d = pkg_resources.get_distribution(r.project_name) installed_ver = str(d.version) except pkg_resources.DistributionNotFound: pass return "%s [%s]" % (r, installed_ver) def search_packages_info(query, index_url=None, session=None): """ Gather details from installed distributions. Print distribution name, version, location, and installed files. Installed files requires a pip generated 'installed-files.txt' in the distributions '.egg-info' directory. """ installed = dict( [(p.key, p) for p in pkg_resources.working_set]) query_names = [name.lower() for name in query] distributions = [installed[pkg] for pkg in query_names if pkg in installed] for dist in distributions: required_by = [] for _, p in installed.items(): r = next((r for r in p.requires() if r.key == dist.key), None) if r: required_by.append("%s %s" % (p.project_name, r.specifier)) else: for e in p.extras: r = next( (r for r in p.requires([e]) if r.key == dist.key), None ) if r: required_by.append( "%s[%s] %s" % (p.project_name, e, r.specifier)) extras = {} for e in dist.extras: reqs = set(dist.requires([e])) - set(dist.requires()) extras[e] = map(_format_package, reqs) if session: from pip.download import PipXmlrpcTransport from pip._vendor.six.moves import xmlrpc_client transport = PipXmlrpcTransport(index_url, session) pypi = xmlrpc_client.ServerProxy(index_url, transport) pypi_releases = pypi.package_releases(dist.project_name) pypi_version = pypi_releases[0] if pypi_releases else 'UNKNOWN' else: pypi_version = None requires = [_format_package(r_) for r_ in dist.requires()] package = { 'name': dist.project_name, 'version': dist.version, 'pypi_version': pypi_version, 'location': dist.location, 'requires': requires, 'required_by': required_by, 'extras': extras } file_list = None metadata = None if isinstance(dist, pkg_resources.DistInfoDistribution): # RECORDs should be part of .dist-info metadatas if dist.has_metadata('RECORD'): lines = dist.get_metadata_lines('RECORD') paths = [l.split(',')[0] for l in lines] paths = [os.path.join(dist.location, p) for p in paths] file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('METADATA'): metadata = dist.get_metadata('METADATA') else: # Otherwise use pip's log for .egg-info's if dist.has_metadata('installed-files.txt'): paths = dist.get_metadata_lines('installed-files.txt') paths = [os.path.join(dist.egg_info, p) for p in paths] file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('PKG-INFO'): metadata = dist.get_metadata('PKG-INFO') if dist.has_metadata('entry_points.txt'): entry_points = dist.get_metadata_lines('entry_points.txt') package['entry_points'] = entry_points installer = None if dist.has_metadata('INSTALLER'): for line in dist.get_metadata_lines('INSTALLER'): if line.strip(): installer = line.strip() break package['installer'] = installer # @todo: Should pkg_resources.Distribution have a # `get_pkg_info` method? feed_parser = FeedParser() feed_parser.feed(metadata) pkg_info_dict = feed_parser.close() for key in ('metadata-version', 'summary', 'home-page', 'author', 'author-email', 'license'): package[key] = pkg_info_dict.get(key) # It looks like FeedParser can not deal with repeated headers classifiers = [] for line in metadata.splitlines(): if not line: break # Classifier: License :: OSI Approved :: MIT License if line.startswith('Classifier: '): classifiers.append(line[len('Classifier: '):]) package['classifiers'] = classifiers if file_list: package['files'] = sorted(file_list) yield package def print_results(distributions, list_all_files): """ Print the informations from installed distributions found. """ results_printed = False for dist in distributions: results_printed = True logger.info("---") logger.info("Metadata-Version: %s", dist.get('metadata-version')) logger.info("Name: %s", dist['name']) logger.info("Version: %s", dist['version']) if dist['pypi_version']: logger.info("PyPi Version: %s", dist['pypi_version']) logger.info("Summary: %s", dist.get('summary')) logger.info("Home-page: %s", dist.get('home-page')) logger.info("Author: %s", dist.get('author')) logger.info("Author-email: %s", dist.get('author-email')) if dist['installer'] is not None: logger.info("Installer: %s", dist['installer']) logger.info("License: %s", dist.get('license')) logger.info("Location: %s", dist['location']) logger.info("Classifiers:") for classifier in dist['classifiers']: logger.info(" %s", classifier) logger.info("Requires:") for line in sorted(dist['requires']): logger.info(" %s", line) for extra_name, deps in dist['extras'].items(): logger.info("Extra Require [%s]:", extra_name) for line in sorted(deps): logger.info(" %s", line.strip()) logger.info("Required by(%d):", len(dist['required_by'])) for line in sorted(dist['required_by']): logger.info(" %s", line.strip()) if list_all_files: logger.info("Files:") if 'files' in dist: for line in dist['files']: logger.info(" %s", line.strip()) else: logger.info("Cannot locate installed-files.txt") if 'entry_points' in dist: logger.info("Entry-points:") for line in dist['entry_points']: logger.info(" %s", line.strip()) return results_printed
[ "pip._vendor.six.moves.xmlrpc_client.ServerProxy", "pip._vendor.pkg_resources.get_distribution", "email.parser.FeedParser", "pip.download.PipXmlrpcTransport", "os.path.relpath", "os.path.join", "logging.getLogger" ]
[((230, 257), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (247, 257), False, 'import logging\n'), ((1908, 1954), 'pip._vendor.pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['r.project_name'], {}), '(r.project_name)\n', (1938, 1954), False, 'from pip._vendor import pkg_resources\n'), ((5801, 5813), 'email.parser.FeedParser', 'FeedParser', ([], {}), '()\n', (5811, 5813), False, 'from email.parser import FeedParser\n'), ((3544, 3582), 'pip.download.PipXmlrpcTransport', 'PipXmlrpcTransport', (['index_url', 'session'], {}), '(index_url, session)\n', (3562, 3582), False, 'from pip.download import PipXmlrpcTransport\n'), ((3602, 3649), 'pip._vendor.six.moves.xmlrpc_client.ServerProxy', 'xmlrpc_client.ServerProxy', (['index_url', 'transport'], {}), '(index_url, transport)\n', (3627, 3649), False, 'from pip._vendor.six.moves import xmlrpc_client\n'), ((4558, 4588), 'os.path.join', 'os.path.join', (['dist.location', 'p'], {}), '(dist.location, p)\n', (4570, 4588), False, 'import os\n'), ((4634, 4667), 'os.path.relpath', 'os.path.relpath', (['p', 'dist.location'], {}), '(p, dist.location)\n', (4649, 4667), False, 'import os\n'), ((5009, 5039), 'os.path.join', 'os.path.join', (['dist.egg_info', 'p'], {}), '(dist.egg_info, p)\n', (5021, 5039), False, 'import os\n'), ((5085, 5118), 'os.path.relpath', 'os.path.relpath', (['p', 'dist.location'], {}), '(p, dist.location)\n', (5100, 5118), False, 'import os\n')]
import torch.nn as nn import torch.nn.functional as F import scipy.io as scio from torchvision.models import vgg19_bn, resnet152, densenet161 class LeNet_300_100(nn.Module): def __init__(self, enable_bias=True): # original code is true super().__init__() self.fc1 = nn.Linear(784, 300, bias=enable_bias) self.fc2 = nn.Linear(300, 100, bias=enable_bias) self.fc3 = nn.Linear(100, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.fc1(x.view(-1, 784))) x = F.relu(self.fc2(x)) return F.log_softmax(self.fc3(x), dim=1) class LeNet_5(nn.Module): def __init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 6, 5, padding=2, bias=enable_bias) self.conv2 = nn.Conv2d(6, 16, 5, bias=enable_bias) self.fc3 = nn.Linear(16 * 5 * 5, 120, bias=enable_bias) self.fc4 = nn.Linear(120, 84, bias=enable_bias) self.fc5 = nn.Linear(84, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 16 * 5 * 5))) x = F.relu(self.fc4(x)) x = F.log_softmax(self.fc5(x), dim=1) return x class LeNet_5_Caffe(nn.Module): """ This is based on Caffe's implementation of Lenet-5 and is slightly different from the vanilla LeNet-5. Note that the first layer does NOT have padding and therefore intermediate shapes do not match the official LeNet-5. """ def __init__(self, enable_bias=True): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5, padding=0, bias=enable_bias) self.conv2 = nn.Conv2d(20, 50, 5, bias=enable_bias) self.fc3 = nn.Linear(50 * 4 * 4, 500, bias=enable_bias) self.fc4 = nn.Linear(500, 10, bias=enable_bias) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = F.relu(self.fc3(x.view(-1, 50 * 4 * 4))) x = F.log_softmax(self.fc4(x), dim=1) return x VGG_CONFIGS = { 'C': [64, 64, 'M', 128, 128, 'M', 256, 256, [256], 'M', 512, 512, [512], 'M', 512, 512, [512], 'M'], 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'like': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']} class VGG(nn.Module): """ This is a base class to generate three VGG variants used in SNIP paper: 1. VGG-C (16 layers) 2. VGG-D (16 layers) 3. VGG-like Some of the differences: * Reduced size of FC layers to 512 * Adjusted flattening to match CIFAR-10 shapes * Replaced dropout layers with BatchNorm """ def __init__(self, config, num_classes=10, enable_bias=True, enable_dump_features=False): super().__init__() self.enable_dump_features = enable_dump_features if enable_dump_features: self.features_block1 = self.make_layers([64, 64, 'M'], in_channels=3, batch_norm=True, enable_bias=enable_bias) self.features_block2 = self.make_layers([128, 128, 'M'], in_channels=64, batch_norm=True, enable_bias=enable_bias) self.features_block3 = self.make_layers([256, 256, [256], 'M'], in_channels=128, batch_norm=True, enable_bias=enable_bias) self.features_block4 = self.make_layers([512, 512, [512], 'M'], in_channels=256, batch_norm=True, enable_bias=enable_bias) self.features_block5 = self.make_layers([512, 512, [512], 'M'], in_channels=512, batch_norm=True, enable_bias=enable_bias) else: self.features = self.make_layers(VGG_CONFIGS[config], batch_norm=True, enable_bias=enable_bias) if config in {'C', 'D'}: self.classifier = nn.Sequential( nn.Linear(512, 512, bias=enable_bias), # 512 * 7 * 7 in the original VGG nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, 512, bias=enable_bias), nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, num_classes, bias=enable_bias)) elif config == 'like': self.classifier = nn.Sequential( nn.Linear(512, 512, bias=enable_bias), # 512 * 7 * 7 in the original VGG nn.ReLU(True), nn.BatchNorm1d(512), # instead of dropout nn.Linear(512, num_classes, bias=enable_bias)) else: assert False @staticmethod def make_layers(config, batch_norm=False, enable_bias=True, in_channels=3): # TODO: BN yes or no? layers = [] for idx, v in enumerate(config): if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: if isinstance(v, list): v, kernel_size, padding = v[0], 1, 0 else: kernel_size, padding = 3, 1 conv2d = nn.Conv2d(in_channels, v, kernel_size=kernel_size, padding=padding, bias=enable_bias) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) def forward(self, input, epoch_id=None, batch_id=None, gt=None): if self.enable_dump_features: feat_block1 = self.features_block1(input) feat_block2 = self.features_block2(feat_block1) feat_block3 = self.features_block3(feat_block2) feat_block4 = self.features_block4(feat_block3) x = self.features_block5(feat_block4) if (epoch_id is not None) and (batch_id is not None): scio.savemat('../checkpoints/inter_features_epoch{}_batch{}.mat'.format(epoch_id, batch_id), {'img': input.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'gt': gt.detach().squeeze().cpu().numpy(), 'b1': feat_block1.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b2': feat_block2.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b3': feat_block3.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b4': feat_block4.detach().squeeze().permute(2,3,1,0).cpu().numpy(), 'b5': x.detach().squeeze().cpu().numpy()}) else: x = self.features(input) x = x.view(x.size(0), -1) x = self.classifier(x) x = F.log_softmax(x, dim=1) return x class AlexNet(nn.Module): # copy from https://medium.com/@kushajreal/training-alexnet-with-tips-and-checks-on-how-to-train-cnns-practical-cnns-in-pytorch-1-61daa679c74a def __init__(self, k=4, num_classes=10, enable_bias=True): super(AlexNet, self).__init__() self.conv_base = nn.Sequential( nn.Conv2d(3, 96, kernel_size=11, stride=2, padding=5, bias=enable_bias), nn.BatchNorm2d(96), nn.ReLU(inplace=True), nn.Conv2d(96, 256, kernel_size=5, stride=2, padding=2, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(384), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, stride=2, padding=1, bias=enable_bias), nn.BatchNorm2d(256), nn.ReLU(inplace=True)) self.fc_base = nn.Sequential( nn.Linear(256, 1024 * k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 * k, 1024 * k), nn.BatchNorm1d(1024 * k), nn.ReLU(inplace=True), nn.Linear(1024 * k, num_classes)) def forward(self, x): x = self.conv_base(x) x = x.view(x.size(0), -1) x = self.fc_base(x) x = F.log_softmax(x, dim=1) return x
[ "torch.nn.ReLU", "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.BatchNorm1d", "torch.nn.Linear", "torch.nn.BatchNorm2d", "torch.nn.functional.log_softmax", "torch.nn.functional.max_pool2d", "torch.nn.MaxPool2d" ]
[((279, 316), 'torch.nn.Linear', 'nn.Linear', (['(784)', '(300)'], {'bias': 'enable_bias'}), '(784, 300, bias=enable_bias)\n', (288, 316), True, 'import torch.nn as nn\n'), ((332, 369), 'torch.nn.Linear', 'nn.Linear', (['(300)', '(100)'], {'bias': 'enable_bias'}), '(300, 100, bias=enable_bias)\n', (341, 369), True, 'import torch.nn as nn\n'), ((385, 421), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(10)'], {'bias': 'enable_bias'}), '(100, 10, bias=enable_bias)\n', (394, 421), True, 'import torch.nn as nn\n'), ((675, 722), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(6)', '(5)'], {'padding': '(2)', 'bias': 'enable_bias'}), '(1, 6, 5, padding=2, bias=enable_bias)\n', (684, 722), True, 'import torch.nn as nn\n'), ((740, 777), 'torch.nn.Conv2d', 'nn.Conv2d', (['(6)', '(16)', '(5)'], {'bias': 'enable_bias'}), '(6, 16, 5, bias=enable_bias)\n', (749, 777), True, 'import torch.nn as nn\n'), ((793, 837), 'torch.nn.Linear', 'nn.Linear', (['(16 * 5 * 5)', '(120)'], {'bias': 'enable_bias'}), '(16 * 5 * 5, 120, bias=enable_bias)\n', (802, 837), True, 'import torch.nn as nn\n'), ((853, 889), 'torch.nn.Linear', 'nn.Linear', (['(120)', '(84)'], {'bias': 'enable_bias'}), '(120, 84, bias=enable_bias)\n', (862, 889), True, 'import torch.nn as nn\n'), ((905, 940), 'torch.nn.Linear', 'nn.Linear', (['(84)', '(10)'], {'bias': 'enable_bias'}), '(84, 10, bias=enable_bias)\n', (914, 940), True, 'import torch.nn as nn\n'), ((1004, 1022), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (1016, 1022), True, 'import torch.nn.functional as F\n'), ((1061, 1079), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (1073, 1079), True, 'import torch.nn.functional as F\n'), ((1566, 1614), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(20)', '(5)'], {'padding': '(0)', 'bias': 'enable_bias'}), '(1, 20, 5, padding=0, bias=enable_bias)\n', (1575, 1614), True, 'import torch.nn as nn\n'), ((1632, 1670), 'torch.nn.Conv2d', 'nn.Conv2d', (['(20)', '(50)', '(5)'], {'bias': 'enable_bias'}), '(20, 50, 5, bias=enable_bias)\n', (1641, 1670), True, 'import torch.nn as nn\n'), ((1686, 1730), 'torch.nn.Linear', 'nn.Linear', (['(50 * 4 * 4)', '(500)'], {'bias': 'enable_bias'}), '(50 * 4 * 4, 500, bias=enable_bias)\n', (1695, 1730), True, 'import torch.nn as nn\n'), ((1746, 1782), 'torch.nn.Linear', 'nn.Linear', (['(500)', '(10)'], {'bias': 'enable_bias'}), '(500, 10, bias=enable_bias)\n', (1755, 1782), True, 'import torch.nn as nn\n'), ((1846, 1864), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (1858, 1864), True, 'import torch.nn.functional as F\n'), ((1903, 1921), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (1915, 1921), True, 'import torch.nn.functional as F\n'), ((5073, 5095), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (5086, 5095), True, 'import torch.nn as nn\n'), ((6301, 6324), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (6314, 6324), True, 'import torch.nn.functional as F\n'), ((7711, 7734), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (7724, 7734), True, 'import torch.nn.functional as F\n'), ((6651, 6722), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(96)'], {'kernel_size': '(11)', 'stride': '(2)', 'padding': '(5)', 'bias': 'enable_bias'}), '(3, 96, kernel_size=11, stride=2, padding=5, bias=enable_bias)\n', (6660, 6722), True, 'import torch.nn as nn\n'), ((6730, 6748), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(96)'], {}), '(96)\n', (6744, 6748), True, 'import torch.nn as nn\n'), ((6756, 6777), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6763, 6777), True, 'import torch.nn as nn\n'), ((6786, 6858), 'torch.nn.Conv2d', 'nn.Conv2d', (['(96)', '(256)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': 'enable_bias'}), '(96, 256, kernel_size=5, stride=2, padding=2, bias=enable_bias)\n', (6795, 6858), True, 'import torch.nn as nn\n'), ((6866, 6885), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (6880, 6885), True, 'import torch.nn as nn\n'), ((6893, 6914), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6900, 6914), True, 'import torch.nn as nn\n'), ((6923, 6996), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(384)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': 'enable_bias'}), '(256, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias)\n', (6932, 6996), True, 'import torch.nn as nn\n'), ((7004, 7023), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(384)'], {}), '(384)\n', (7018, 7023), True, 'import torch.nn as nn\n'), ((7031, 7052), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7038, 7052), True, 'import torch.nn as nn\n'), ((7061, 7134), 'torch.nn.Conv2d', 'nn.Conv2d', (['(384)', '(384)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': 'enable_bias'}), '(384, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias)\n', (7070, 7134), True, 'import torch.nn as nn\n'), ((7142, 7161), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(384)'], {}), '(384)\n', (7156, 7161), True, 'import torch.nn as nn\n'), ((7169, 7190), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7176, 7190), True, 'import torch.nn as nn\n'), ((7199, 7272), 'torch.nn.Conv2d', 'nn.Conv2d', (['(384)', '(256)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': 'enable_bias'}), '(384, 256, kernel_size=3, stride=2, padding=1, bias=enable_bias)\n', (7208, 7272), True, 'import torch.nn as nn\n'), ((7280, 7299), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (7294, 7299), True, 'import torch.nn as nn\n'), ((7307, 7328), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7314, 7328), True, 'import torch.nn as nn\n'), ((7371, 7395), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(1024 * k)'], {}), '(256, 1024 * k)\n', (7380, 7395), True, 'import torch.nn as nn\n'), ((7403, 7427), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(1024 * k)'], {}), '(1024 * k)\n', (7417, 7427), True, 'import torch.nn as nn\n'), ((7435, 7456), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7442, 7456), True, 'import torch.nn as nn\n'), ((7465, 7494), 'torch.nn.Linear', 'nn.Linear', (['(1024 * k)', '(1024 * k)'], {}), '(1024 * k, 1024 * k)\n', (7474, 7494), True, 'import torch.nn as nn\n'), ((7502, 7526), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(1024 * k)'], {}), '(1024 * k)\n', (7516, 7526), True, 'import torch.nn as nn\n'), ((7534, 7555), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7541, 7555), True, 'import torch.nn as nn\n'), ((7564, 7596), 'torch.nn.Linear', 'nn.Linear', (['(1024 * k)', 'num_classes'], {}), '(1024 * k, num_classes)\n', (7573, 7596), True, 'import torch.nn as nn\n'), ((3711, 3748), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(512)'], {'bias': 'enable_bias'}), '(512, 512, bias=enable_bias)\n', (3720, 3748), True, 'import torch.nn as nn\n'), ((3793, 3806), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3800, 3806), True, 'import torch.nn as nn\n'), ((3816, 3835), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(512)'], {}), '(512)\n', (3830, 3835), True, 'import torch.nn as nn\n'), ((3867, 3904), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(512)'], {'bias': 'enable_bias'}), '(512, 512, bias=enable_bias)\n', (3876, 3904), True, 'import torch.nn as nn\n'), ((3914, 3927), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3921, 3927), True, 'import torch.nn as nn\n'), ((3937, 3956), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(512)'], {}), '(512)\n', (3951, 3956), True, 'import torch.nn as nn\n'), ((3988, 4033), 'torch.nn.Linear', 'nn.Linear', (['(512)', 'num_classes'], {'bias': 'enable_bias'}), '(512, num_classes, bias=enable_bias)\n', (3997, 4033), True, 'import torch.nn as nn\n'), ((4750, 4840), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'v'], {'kernel_size': 'kernel_size', 'padding': 'padding', 'bias': 'enable_bias'}), '(in_channels, v, kernel_size=kernel_size, padding=padding, bias=\n enable_bias)\n', (4759, 4840), True, 'import torch.nn as nn\n'), ((4109, 4146), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(512)'], {'bias': 'enable_bias'}), '(512, 512, bias=enable_bias)\n', (4118, 4146), True, 'import torch.nn as nn\n'), ((4191, 4204), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (4198, 4204), True, 'import torch.nn as nn\n'), ((4214, 4233), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(512)'], {}), '(512)\n', (4228, 4233), True, 'import torch.nn as nn\n'), ((4265, 4310), 'torch.nn.Linear', 'nn.Linear', (['(512)', 'num_classes'], {'bias': 'enable_bias'}), '(512, num_classes, bias=enable_bias)\n', (4274, 4310), True, 'import torch.nn as nn\n'), ((4550, 4587), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (4562, 4587), True, 'import torch.nn as nn\n'), ((4909, 4926), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['v'], {}), '(v)\n', (4923, 4926), True, 'import torch.nn as nn\n'), ((4949, 4970), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4956, 4970), True, 'import torch.nn as nn\n'), ((5015, 5036), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5022, 5036), True, 'import torch.nn as nn\n')]
import os import tempfile import subprocess import getpass import shutil from textwrap import dedent def get_r_env(): env = {} executable = 'R' try: # get notebook app from notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance() kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) env.update(kernel_spec.env) executable = kernel_spec.argv[0] # patch LD_LIBRARY_PATH for conda env conda_lib_dir = os.path.join(env['CONDA_PREFIX'], 'lib') #r_lib_dir = os.path.join(conda_lib_dir, 'R/lib') env.update({ # 'LD_LIBRARY_PATH': r_lib_dir + ':' + conda_lib_dir 'LD_LIBRARY_PATH': conda_lib_dir }) except Exception: nbapp.log.warning('Error when trying to get R executable from kernel') # Detect various environment variables rsession requires to run # Via rstudio's src/cpp/core/r_util/REnvironmentPosix.cpp cmd = [executable, '--slave', '--vanilla', '-e', 'cat(paste(R.home("home"),R.home("share"),R.home("include"),R.home("doc"),getRversion(),sep=":"))'] r_output = subprocess.check_output(cmd) R_HOME, R_SHARE_DIR, R_INCLUDE_DIR, R_DOC_DIR, version = \ r_output.decode().split(':') # TODO: # maybe set a few more env vars? # e.g. MAXENT, DISPLAY='' (to avoid issues with java) # e.g. would be nice if RStudio terminal starts with correct conda env? # -> either patch ~/Renviron / Renviron.site # -> user Rprofile.site (if conda env specific?) # -> use ~/.Rprofile ... if user specific? # make R kernel used configurable? # ... or rather use standard system R installation, and let user install stuff in home folder? env.update({ 'R_DOC_DIR': R_DOC_DIR, 'R_HOME': R_HOME, 'R_INCLUDE_DIR': R_INCLUDE_DIR, 'R_SHARE_DIR': R_SHARE_DIR, 'RSTUDIO_DEFAULT_R_VERSION_HOME': R_HOME, 'RSTUDIO_DEFAULT_R_VERSION': version, }) return env def setup_shiny(): '''Manage a Shiny instance.''' def _get_shiny_cmd(port): # server.r_path ??? conf = dedent(""" run_as {user}; server {{ bookmark_state_dir {site_dir}/shiny-server-boomarks; listen {port}; location / {{ site_dir {site_dir}; log_dir {site_dir}/logs; directory_index on; }} }} """).format( user=getpass.getuser(), port=str(port), site_dir=os.getcwd() ) f = tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(conf) f.close() return ['shiny-server', f.name] def _get_shiny_env(port): env = get_r_env() return env return { 'command': _get_shiny_cmd, 'environment': _get_shiny_env, 'launcher_entry': { 'title': 'Shiny', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'shiny.svg') } } def setup_rstudio(): def _get_rsession_env(port): env = get_r_env() # rserver needs USER to be set to something sensible, # otherwise it'll throw up an authentication page if not os.environ.get('USER', ''): env['USER'] = getpass.getuser() return env def _get_r_executable(): try: # get notebook app from notebook.notebookapp import NotebookApp nbapp = NotebookApp.instance() # get R executable: kernel_name = nbapp.kernel_manager.default_kernel_name if kernel_name: kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name) return kernel_spec.argv[0] except Exception: nbapp.log.warning('Error when trying to get R executable from kernel') return 'R' def _get_rsession_cmd(port): # Other paths rsession maybe in other_paths = [ # When rstudio-server deb is installed '/usr/lib/rstudio-server/bin/rserver', ] if shutil.which('rserver'): executable = 'rserver' else: for op in other_paths: if os.path.exists(op): executable = op break else: raise FileNotFoundError('Can not find rserver in PATH') cmd = [ executable, '--www-port=' + str(port), '--rsession-which-r=' + _get_r_executable(), ] env = get_r_env() if env.get('LD_LIBRARY_PATH'): cmd.append('--rsession-ld-library-path=' + env['LD_LIBRARY_PATH']) return cmd return { 'command': _get_rsession_cmd, 'environment': _get_rsession_env, 'launcher_entry': { 'title': 'RStudio', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'rstudio.svg') } }
[ "textwrap.dedent", "tempfile.NamedTemporaryFile", "os.path.abspath", "getpass.getuser", "os.getcwd", "subprocess.check_output", "os.path.exists", "shutil.which", "os.environ.get", "notebook.notebookapp.NotebookApp.instance", "os.path.join" ]
[((1286, 1314), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {}), '(cmd)\n', (1309, 1314), False, 'import subprocess\n'), ((259, 281), 'notebook.notebookapp.NotebookApp.instance', 'NotebookApp.instance', ([], {}), '()\n', (279, 281), False, 'from notebook.notebookapp import NotebookApp\n'), ((2793, 2844), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (2820, 2844), False, 'import tempfile\n'), ((4356, 4379), 'shutil.which', 'shutil.which', (['"""rserver"""'], {}), "('rserver')\n", (4368, 4379), False, 'import shutil\n'), ((614, 654), 'os.path.join', 'os.path.join', (["env['CONDA_PREFIX']", '"""lib"""'], {}), "(env['CONDA_PREFIX'], 'lib')\n", (626, 654), False, 'import os\n'), ((3486, 3512), 'os.environ.get', 'os.environ.get', (['"""USER"""', '""""""'], {}), "('USER', '')\n", (3500, 3512), False, 'import os\n'), ((3540, 3557), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (3555, 3557), False, 'import getpass\n'), ((3729, 3751), 'notebook.notebookapp.NotebookApp.instance', 'NotebookApp.instance', ([], {}), '()\n', (3749, 3751), False, 'from notebook.notebookapp import NotebookApp\n'), ((2302, 2674), 'textwrap.dedent', 'dedent', (['"""\n run_as {user};\n server {{\n bookmark_state_dir {site_dir}/shiny-server-boomarks;\n listen {port};\n location / {{\n site_dir {site_dir};\n log_dir {site_dir}/logs;\n directory_index on;\n }}\n }}\n """'], {}), '(\n """\n run_as {user};\n server {{\n bookmark_state_dir {site_dir}/shiny-server-boomarks;\n listen {port};\n location / {{\n site_dir {site_dir};\n log_dir {site_dir}/logs;\n directory_index on;\n }}\n }}\n """\n )\n', (2308, 2674), False, 'from textwrap import dedent\n'), ((2690, 2707), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (2705, 2707), False, 'import getpass\n'), ((2758, 2769), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2767, 2769), False, 'import os\n'), ((4484, 4502), 'os.path.exists', 'os.path.exists', (['op'], {}), '(op)\n', (4498, 4502), False, 'import os\n'), ((3201, 3226), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (3216, 3226), False, 'import os\n'), ((5174, 5199), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (5189, 5199), False, 'import os\n')]
__author__ = 'alexanderstolz' import hib_sql_connection def getAllTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, "select * from umsatz;") def getOutgoingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, "select * from umsatz where betrag < 0;") def getIncomingTransfers(): connection = hib_sql_connection.connectToHibiscus() return hib_sql_connection.queryToHibiscus(connection, "select * from umsatz where betrag > 0;")
[ "hib_sql_connection.queryToHibiscus", "hib_sql_connection.connectToHibiscus" ]
[((100, 138), 'hib_sql_connection.connectToHibiscus', 'hib_sql_connection.connectToHibiscus', ([], {}), '()\n', (136, 138), False, 'import hib_sql_connection\n'), ((150, 221), 'hib_sql_connection.queryToHibiscus', 'hib_sql_connection.queryToHibiscus', (['connection', '"""select * from umsatz;"""'], {}), "(connection, 'select * from umsatz;')\n", (184, 221), False, 'import hib_sql_connection\n'), ((268, 306), 'hib_sql_connection.connectToHibiscus', 'hib_sql_connection.connectToHibiscus', ([], {}), '()\n', (304, 306), False, 'import hib_sql_connection\n'), ((318, 410), 'hib_sql_connection.queryToHibiscus', 'hib_sql_connection.queryToHibiscus', (['connection', '"""select * from umsatz where betrag < 0;"""'], {}), "(connection,\n 'select * from umsatz where betrag < 0;')\n", (352, 410), False, 'import hib_sql_connection\n'), ((453, 491), 'hib_sql_connection.connectToHibiscus', 'hib_sql_connection.connectToHibiscus', ([], {}), '()\n', (489, 491), False, 'import hib_sql_connection\n'), ((503, 595), 'hib_sql_connection.queryToHibiscus', 'hib_sql_connection.queryToHibiscus', (['connection', '"""select * from umsatz where betrag > 0;"""'], {}), "(connection,\n 'select * from umsatz where betrag > 0;')\n", (537, 595), False, 'import hib_sql_connection\n')]
import os,sys,talib,numpy,math,logging,time,datetime,numbers from collections import OrderedDict from baseindicator import BaseIndicator class EMA(BaseIndicator): def __init__(self,csdata, config = {}): config["period"] = config.get("period",30) config["metric"] = config.get("metric","closed") config["label"] = config.get("label","ema") config["label"] = "{}{}".format(config["label"],config["period"]) BaseIndicator.__init__(self,csdata,config) self.chartcolors = ["mediumslateblue"] self.data = None self.analysis = None self.get_analysis() def get_settings(self): return "{}".format(self.config["period"]) def get_charts(self): data = [] for i in range(0,len(self.csdata[ self.config["metric"] ])): if isinstance(self.data[i],numbers.Number) and self.data[i] > 0: ts = time.mktime(datetime.datetime.strptime(self.csdata["time"][i], "%Y-%m-%dT%H:%M:%SZ").timetuple()) data.append({ "x": ts, "y": self.data[i], }) return [{ "key": "{}:{}".format(self.label,self.config["period"]), "type": "line", "color": "#FFF5EE", "yAxis": 1, "values": data }] def get_ema(self): if self.csdata is not None: try: smetric = self.scaleup( self.csdata[self.config["metric"]]) data = talib.EMA( numpy.array(smetric), self.config["period"]) self.data = self.scaledown(data) # scaledown except Exception as ex: self.data = None raise ex return self.data def get_analysis(self ): if self.data is None: self.get_ema() ema = self.data[-1] ema1 = self.data[-2] slope = None for k in range(-1,-10,-1): if slope == None: slope = self.data[k-1] / self.data[k] else: slope = slope / ( self.data[k-1] / self.data[k] ) last_price = self.csdata["closed"][-1] closing_time = self.csdata["time"][-1] action = None if last_price < ema: action = "oversold" res = { "weight": 2, "time": closing_time, "indicator-data": { "ema": ema }, "analysis": OrderedDict() } res["analysis"]["name"] = "{}:{}".format(self.get_name(),self.get_settings()) res["analysis"]["signal"] = action res["analysis"]["ema"] = ema res["analysis"]["slope"] = slope res["analysis"]["order"] = ["ema"] self.analysis = res return res def format_view(self): newres = dict(self.analysis["analysis"]) newres["slope"] = "{:.4f}".format(newres["slope"]) newres["ema"] = "{:.8f}".format(newres["ema"]) return newres
[ "collections.OrderedDict", "datetime.datetime.strptime", "numpy.array", "baseindicator.BaseIndicator.__init__" ]
[((455, 499), 'baseindicator.BaseIndicator.__init__', 'BaseIndicator.__init__', (['self', 'csdata', 'config'], {}), '(self, csdata, config)\n', (477, 499), False, 'from baseindicator import BaseIndicator\n'), ((2563, 2576), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2574, 2576), False, 'from collections import OrderedDict\n'), ((1567, 1587), 'numpy.array', 'numpy.array', (['smetric'], {}), '(smetric)\n', (1578, 1587), False, 'import os, sys, talib, numpy, math, logging, time, datetime, numbers\n'), ((934, 1006), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["self.csdata['time'][i]", '"""%Y-%m-%dT%H:%M:%SZ"""'], {}), "(self.csdata['time'][i], '%Y-%m-%dT%H:%M:%SZ')\n", (960, 1006), False, 'import os, sys, talib, numpy, math, logging, time, datetime, numbers\n')]
from http.server import HTTPServer,BaseHTTPRequestHandler import signal import sys class Server(BaseHTTPRequestHandler) : def _set_response(self): self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() def do_POST(self): content_length = int(self.headers['Content-Length']) data = self.rfile.read(content_length).decode('utf-8') dataSplit = data.split(" ") peerId = str(dataSplit[0]) count = str(dataSplit[-1]) peerLogFile = "server-"+peerId+".log" def send_response(): self.send_response(200) self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write(("ACK "+count).encode("utf-8")) try: with open(peerLogFile, "r") as f: lastEntry = f.readlines()[-1] lastId = str(lastEntry.split(" ")[-1]) if int(lastId) > int(count): send_response() return except (FileNotFoundError, IndexError): print("No server.log file available yet.") with open(peerLogFile, "a") as f: print(str(data)) f.write(str(data) + "\n") f.close() send_response() def stop_server(server): print("Stop server.") server.server_close() sys.exit(0) def run(server_class=HTTPServer, handler_class=Server): print("Start server on port 8000.") server_address = ('', 8000) httpd = server_class(server_address, handler_class) httpd.handle_request() try: httpd.serve_forever() except KeyboardInterrupt : stop_server(httpd) if __name__ == "__main__": print("Create server") run()
[ "sys.exit" ]
[((1244, 1255), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1252, 1255), False, 'import sys\n')]
from rest_framework import serializers from .models import Profile class ProfileSerializer(serializers.ModelSerializer): last_name = serializers.CharField(source='user.last_name') bio = serializers.CharField(allow_blank=True,required=False) #work_domain = serializers.CharField(max_length=50) image = serializers.SerializerMethodField() following = serializers.SerializerMethodField() class Meta: model = Profile fields = ('last_name','bio','image','following')#,'work_domain') read_only_fields = ('last_name',) def get_image(self,obj): if obj.image: return obj.image return 'https://image.flaticon.com/icons/svg/1738/1738691.svg' def get_following(self,instance): request = self.context.get('request',None) if request is None: return False if not request.user.is_authenticated: return False follower = request.user.profile followee = instance return follower.is_following(followee)
[ "rest_framework.serializers.CharField", "rest_framework.serializers.SerializerMethodField" ]
[((138, 184), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""user.last_name"""'}), "(source='user.last_name')\n", (159, 184), False, 'from rest_framework import serializers\n'), ((195, 250), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'allow_blank': '(True)', 'required': '(False)'}), '(allow_blank=True, required=False)\n', (216, 250), False, 'from rest_framework import serializers\n'), ((318, 353), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (351, 353), False, 'from rest_framework import serializers\n'), ((370, 405), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (403, 405), False, 'from rest_framework import serializers\n')]
import os import Token from xml.dom import minidom class ProcessDoc: def __init__(self, path, lemma, stem): self.collection_dic = {} self.doc_info = [] self.lemma = lemma self.stem = stem self.path = path def run(self): for filename in os.listdir(self.path): doc_no, doclen, max_tf, doc_dic = self.load_file(os.path.join(self.path, filename)) for key, value in doc_dic.items(): # add token to dictionary if not exist if key not in self.collection_dic: self.collection_dic[key] = [1, value, [doc_no, value, doclen, max_tf]] # increase the frequency by one if exist in the dictionary else: self.collection_dic[key][0] += 1 self.collection_dic[key][1] += value self.collection_dic[key].append([doc_no, value, doclen, max_tf]) def load_file(self, url): # parse xml doc from the url mydoc = minidom.parse(url) # read doc NO doc = mydoc.getElementsByTagName('DOCNO')[0] doc_no = int(doc.firstChild.data) # read doc text file text = mydoc.getElementsByTagName('TEXT')[0] data = text.firstChild.data token = Token.Token() if self.lemma == 1: token.apply_lemma() elif self.stem == 1: token.apply_stemming() return token.tokenize(data, doc_no)
[ "Token.Token", "xml.dom.minidom.parse", "os.path.join", "os.listdir" ]
[((295, 316), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (305, 316), False, 'import os\n'), ((1036, 1054), 'xml.dom.minidom.parse', 'minidom.parse', (['url'], {}), '(url)\n', (1049, 1054), False, 'from xml.dom import minidom\n'), ((1309, 1322), 'Token.Token', 'Token.Token', ([], {}), '()\n', (1320, 1322), False, 'import Token\n'), ((379, 412), 'os.path.join', 'os.path.join', (['self.path', 'filename'], {}), '(self.path, filename)\n', (391, 412), False, 'import os\n')]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('projects', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='UpdateRequest', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('requested_by', models.ManyToManyField(related_name=b'requested_availability_updates', to=settings.AUTH_USER_MODEL)), ('user', models.ForeignKey(related_name=b'update_requests', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Week', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('date', models.DateField()), ('allocation', models.IntegerField(default=0)), ('proposed', models.ManyToManyField(related_name=b'availability_weeks', to='projects.ProposedResource')), ('user', models.ForeignKey(related_name=b'availability_weeks', to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ('date',), }, bases=(models.Model,), ), migrations.AlterUniqueTogether( name='week', unique_together=set([('user', 'date')]), ), ]
[ "django.db.migrations.swappable_dependency", "django.db.models.ManyToManyField", "django.db.models.ForeignKey", "django.db.models.AutoField", "django.db.models.IntegerField", "django.db.models.DateField" ]
[((248, 305), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (279, 305), False, 'from django.db import models, migrations\n'), ((443, 536), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (459, 536), False, 'from django.db import models, migrations\n'), ((568, 672), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': "b'requested_availability_updates'", 'to': 'settings.AUTH_USER_MODEL'}), "(related_name=b'requested_availability_updates', to=\n settings.AUTH_USER_MODEL)\n", (590, 672), False, 'from django.db import models, migrations\n'), ((695, 774), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'related_name': "b'update_requests'", 'to': 'settings.AUTH_USER_MODEL'}), "(related_name=b'update_requests', to=settings.AUTH_USER_MODEL)\n", (712, 774), False, 'from django.db import models, migrations\n'), ((976, 1069), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (992, 1069), False, 'from django.db import models, migrations\n'), ((1093, 1111), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (1109, 1111), False, 'from django.db import models, migrations\n'), ((1145, 1175), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1164, 1175), False, 'from django.db import models, migrations\n'), ((1207, 1302), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': "b'availability_weeks'", 'to': '"""projects.ProposedResource"""'}), "(related_name=b'availability_weeks', to=\n 'projects.ProposedResource')\n", (1229, 1302), False, 'from django.db import models, migrations\n'), ((1325, 1412), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'related_name': "b'availability_weeks'", 'to': 'settings.AUTH_USER_MODEL'}), "(related_name=b'availability_weeks', to=settings.\n AUTH_USER_MODEL)\n", (1342, 1412), False, 'from django.db import models, migrations\n')]
# config.py import os import datetime import argparse result_path = "results/" result_path = os.path.join(result_path, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S/')) parser = argparse.ArgumentParser(description='Your project title goes here') # ======================== Data Setings ============================================ parser.add_argument('--dataset-test', type=str, default='CIFAR10', metavar='', help='name of training dataset') parser.add_argument('--dataset-train', type=str, default='CIFAR10', metavar='', help='name of training dataset') parser.add_argument('--split_test', type=float, default=None, metavar='', help='percentage of test dataset to split') parser.add_argument('--split_train', type=float, default=None, metavar='', help='percentage of train dataset to split') parser.add_argument('--dataroot', type=str, default='../../data', metavar='', help='path to the data') parser.add_argument('--save', type=str, default=result_path +'Save', metavar='', help='save the trained models here') parser.add_argument('--logs', type=str, default=result_path +'Logs', metavar='', help='save the training log files here') parser.add_argument('--resume', type=str, default=None, metavar='', help='full path of models to resume training') parser.add_argument('--nclasses', type=int, default=10, metavar='', help='number of classes for classification') parser.add_argument('--input-filename-test', type=str, default=None, metavar='', help='input test filename for filelist and folderlist') parser.add_argument('--label-filename-test', type=str, default=None, metavar='', help='label test filename for filelist and folderlist') parser.add_argument('--input-filename-train', type=str, default=None, metavar='', help='input train filename for filelist and folderlist') parser.add_argument('--label-filename-train', type=str, default=None, metavar='', help='label train filename for filelist and folderlist') parser.add_argument('--loader-input', type=str, default=None, metavar='', help='input loader') parser.add_argument('--loader-label', type=str, default=None, metavar='', help='label loader') # ======================== Network Model Setings =================================== parser.add_argument('--nblocks', type=int, default=10, metavar='', help='number of blocks in each layer') parser.add_argument('--nlayers', type=int, default=6, metavar='', help='number of layers') parser.add_argument('--nchannels', type=int, default=3, metavar='', help='number of input channels') parser.add_argument('--nfilters', type=int, default=64, metavar='', help='number of filters in each layer') parser.add_argument('--avgpool', type=int, default=1, metavar='', help='set to 7 for imagenet and 1 for cifar10') parser.add_argument('--level', type=float, default=0.1, metavar='', help='noise level for uniform noise') parser.add_argument('--resolution-high', type=int, default=32, metavar='', help='image resolution height') parser.add_argument('--resolution-wide', type=int, default=32, metavar='', help='image resolution width') parser.add_argument('--ndim', type=int, default=None, metavar='', help='number of feature dimensions') parser.add_argument('--nunits', type=int, default=None, metavar='', help='number of units in hidden layers') parser.add_argument('--dropout', type=float, default=None, metavar='', help='dropout parameter') parser.add_argument('--net-type', type=str, default='noiseresnet18', metavar='', help='type of network') parser.add_argument('--length-scale', type=float, default=None, metavar='', help='length scale') parser.add_argument('--tau', type=float, default=None, metavar='', help='Tau') # ======================== Training Settings ======================================= parser.add_argument('--cuda', type=bool, default=True, metavar='', help='run on gpu') parser.add_argument('--ngpu', type=int, default=1, metavar='', help='number of gpus to use') parser.add_argument('--batch-size', type=int, default=64, metavar='', help='batch size for training') parser.add_argument('--nepochs', type=int, default=500, metavar='', help='number of epochs to train') parser.add_argument('--niters', type=int, default=None, metavar='', help='number of iterations at test time') parser.add_argument('--epoch-number', type=int, default=None, metavar='', help='epoch number') parser.add_argument('--nthreads', type=int, default=20, metavar='', help='number of threads for data loading') parser.add_argument('--manual-seed', type=int, default=1, metavar='', help='manual seed for randomness') parser.add_argument('--port', type=int, default=8097, metavar='', help='port for visualizing training at http://localhost:port') # ======================== Hyperparameter Setings ================================== parser.add_argument('--optim-method', type=str, default='Adam', metavar='', help='the optimization routine ') parser.add_argument('--learning-rate', type=float, default=1e-3, metavar='', help='learning rate') parser.add_argument('--learning-rate-decay', type=float, default=None, metavar='', help='learning rate decay') parser.add_argument('--momentum', type=float, default=0.9, metavar='', help='momentum') parser.add_argument('--weight-decay', type=float, default=1e-4, metavar='', help='weight decay') parser.add_argument('--adam-beta1', type=float, default=0.9, metavar='', help='Beta 1 parameter for Adam') parser.add_argument('--adam-beta2', type=float, default=0.999, metavar='', help='Beta 2 parameter for Adam') args = parser.parse_args()
[ "datetime.datetime.now", "argparse.ArgumentParser" ]
[((186, 253), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Your project title goes here"""'}), "(description='Your project title goes here')\n", (209, 253), False, 'import argparse\n'), ((120, 143), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (141, 143), False, 'import datetime\n')]
from openprocurement.api.models import Organization as BaseOrganization from openprocurement.tender.cfaselectionua.models.submodels.contactpoint import ContactPoint from schematics.types import StringType from schematics.types.compound import ModelType from openprocurement.api.roles import RolesFromCsv from openprocurement.api.models import ListType class Organization(BaseOrganization): """An organization.""" contactPoint = ModelType(ContactPoint) class ProcuringEntity(Organization): """An organization.""" class Options: roles = RolesFromCsv('ProcuringEntity.csv', relative_to=__file__) kind = StringType(choices=['general', 'special', 'defense', 'other']) additionalContactPoints = ListType( ModelType(ContactPoint, required=True), required=False )
[ "schematics.types.StringType", "schematics.types.compound.ModelType", "openprocurement.api.roles.RolesFromCsv" ]
[((437, 460), 'schematics.types.compound.ModelType', 'ModelType', (['ContactPoint'], {}), '(ContactPoint)\n', (446, 460), False, 'from schematics.types.compound import ModelType\n'), ((632, 694), 'schematics.types.StringType', 'StringType', ([], {'choices': "['general', 'special', 'defense', 'other']"}), "(choices=['general', 'special', 'defense', 'other'])\n", (642, 694), False, 'from schematics.types import StringType\n'), ((563, 620), 'openprocurement.api.roles.RolesFromCsv', 'RolesFromCsv', (['"""ProcuringEntity.csv"""'], {'relative_to': '__file__'}), "('ProcuringEntity.csv', relative_to=__file__)\n", (575, 620), False, 'from openprocurement.api.roles import RolesFromCsv\n'), ((743, 781), 'schematics.types.compound.ModelType', 'ModelType', (['ContactPoint'], {'required': '(True)'}), '(ContactPoint, required=True)\n', (752, 781), False, 'from schematics.types.compound import ModelType\n')]
import os import glob files = open('dados.dll') data = files.read() files.close #desktop of user user_info = os.path.expanduser('~') location_default = os.path.expanduser('~\\Desktop') location = os.path.expanduser('~\\Desktop') desktop = os.path.expanduser(f'{location}').replace(f'{user_info}', f'{data}') os.chdir(location) help_git = ''' These are common Git commands used in various situations: start a working area (see also: git help tutorial) clone Clone a repository into a new directory init Create an empty Git repository or reinitialize an existing one work on the current change (see also: git help everyday) add Add file contents to the index mv Move or rename a file, a directory, or a symlink reset Reset current HEAD to the specified state rm Remove files from the working tree and from the index examine the history and state (see also: git help revisions) bisect Use binary search to find the commit that introduced a bug grep Print lines matching a pattern log Show commit logs show Show various types of objects status Show the working tree status grow, mark and tweak your common history branch List, create, or delete branches checkout Switch branches or restore working tree files commit Record changes to the repository diff Show changes between commits, commit and working tree, etc merge Join two or more development histories together rebase Reapply commits on top of another base tip tag Create, list, delete or verify a tag object signed with GPG collaborate (see also: git help workflows) fetch Download objects and refs from another repository pull Fetch from and integrate with another repository or a local branch push Update remote refs along with associated objects 'git help -a' and 'git help -g' list available subcommands and some concept guides. See 'git help <command>' or 'git help <concept>' to read about a specific subcommand or concept. ''' command_list = ''' List of Commands wget missing URL git help -g' list available subcommands cat list content in file cd browse directories ls listing files clear clean terminal ''' while True: command = input(f'{desktop}:# ') if command == 'ls': #listing files os.chdir(location) print(location) location = os.getcwd() desktop = os.path.expanduser(f'{location}').replace(f'{user_info}', f'{data}') for file in glob.glob("*"): print(file) elif(command[0] == 'c' and command[1] == 'd'): #browsing the files location = str(command).replace("cd", "").replace(" ","") if(command.count("..")): os.chdir('../') location = os.getcwd() desktop = os.path.expanduser(f'{location}').replace(f'{user_info}', f'{data}') else: command = command.replace("cd ", "") os.chdir(command) location = os.getcwd() desktop = os.path.expanduser(f'{location}').replace(f'{user_info}', f'{data}') elif command == 'clear': #clean in terminal os.system('cls' if os.name == 'nt' else 'clear') elif command == 'refresh': #restart of terminal os.system('cls' if os.name == 'nt' else 'clear') os.system('python refresh.py') exit() elif(command.count('cat ')): command = command.replace("cat ", "") cat = open(f'{command}', 'r') content = cat.readlines() #listing content of file print('\n') for line in content: print(line) cat.close() print('\n') elif command[0] == 'g' and command[1] == 'i' and command[2] == 't': os.system(command) elif command == '': pass elif command == 'help' or command == 'commands': print(command_list) elif(command.count('wget ')): print(os.getcwd()) elif(command == 'pwd'): print(location)
[ "os.getcwd", "os.system", "glob.glob", "os.path.expanduser", "os.chdir" ]
[((111, 134), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (129, 134), False, 'import os\n'), ((155, 187), 'os.path.expanduser', 'os.path.expanduser', (['"""~\\\\Desktop"""'], {}), "('~\\\\Desktop')\n", (173, 187), False, 'import os\n'), ((199, 231), 'os.path.expanduser', 'os.path.expanduser', (['"""~\\\\Desktop"""'], {}), "('~\\\\Desktop')\n", (217, 231), False, 'import os\n'), ((312, 330), 'os.chdir', 'os.chdir', (['location'], {}), '(location)\n', (320, 330), False, 'import os\n'), ((242, 275), 'os.path.expanduser', 'os.path.expanduser', (['f"""{location}"""'], {}), "(f'{location}')\n", (260, 275), False, 'import os\n'), ((2392, 2410), 'os.chdir', 'os.chdir', (['location'], {}), '(location)\n', (2400, 2410), False, 'import os\n'), ((2462, 2473), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2471, 2473), False, 'import os\n'), ((2602, 2616), 'glob.glob', 'glob.glob', (['"""*"""'], {}), "('*')\n", (2611, 2616), False, 'import glob\n'), ((2496, 2529), 'os.path.expanduser', 'os.path.expanduser', (['f"""{location}"""'], {}), "(f'{location}')\n", (2514, 2529), False, 'import os\n'), ((2891, 2906), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (2899, 2906), False, 'import os\n'), ((2934, 2945), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2943, 2945), False, 'import os\n'), ((3142, 3159), 'os.chdir', 'os.chdir', (['command'], {}), '(command)\n', (3150, 3159), False, 'import os\n'), ((3187, 3198), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3196, 3198), False, 'import os\n'), ((3379, 3427), 'os.system', 'os.system', (["('cls' if os.name == 'nt' else 'clear')"], {}), "('cls' if os.name == 'nt' else 'clear')\n", (3388, 3427), False, 'import os\n'), ((3518, 3566), 'os.system', 'os.system', (["('cls' if os.name == 'nt' else 'clear')"], {}), "('cls' if os.name == 'nt' else 'clear')\n", (3527, 3566), False, 'import os\n'), ((3579, 3609), 'os.system', 'os.system', (['"""python refresh.py"""'], {}), "('python refresh.py')\n", (3588, 3609), False, 'import os\n'), ((2972, 3005), 'os.path.expanduser', 'os.path.expanduser', (['f"""{location}"""'], {}), "(f'{location}')\n", (2990, 3005), False, 'import os\n'), ((3225, 3258), 'os.path.expanduser', 'os.path.expanduser', (['f"""{location}"""'], {}), "(f'{location}')\n", (3243, 3258), False, 'import os\n'), ((4093, 4111), 'os.system', 'os.system', (['command'], {}), '(command)\n', (4102, 4111), False, 'import os\n'), ((4302, 4313), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4311, 4313), False, 'import os\n')]
# %% import os import sys # os.chdir("../../..") os.environ['DJANGO_SETTINGS_MODULE'] = 'MAKDataHub.settings' import django django.setup() # %% import math import pickle import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from MAKDataHub.services import Services profile_service = Services.profile_service() storage_service = Services.storage_service() last_run = profile_service.get_last_profile_creation_run() ## New database full_df: pd.DataFrame = pickle.load(last_run.unlock_data.open('rb')) # full_df = full_df.loc[full_df.DeviceId != 3].reset_index(drop = True) ## Old database # unlock_data_path = storage_service.download_file(last_run.unlock_data_uri) # full_df: pd.DataFrame = pickle.load(open(unlock_data_path, 'rb')) # full_df = full_df.loc[full_df.DeviceId != '1439cbc3ad71ac06'].reset_index(drop = True) # %% df = full_df.iloc[:, list(range(36)) + list(range(72, 108)) + list(range(108, 144)) + list(range(180, 216)) + [216]].reset_index(drop = True) df = df.loc[(df.DeviceId != 4) & (df.DeviceId != 7), :].reset_index(drop = True) X, y = df.iloc[:, 0:-1], df.iloc[:, -1] #%% full_df.shape # %% display(full_df.iloc[:, list(range(36)) + [216]].groupby('DeviceId').agg([np.min, np.max])) display(full_df.iloc[:, list(range(36, 72)) + [216]].groupby('DeviceId').agg([np.min, np.max])) display(full_df.iloc[:, list(range(72, 108)) + [216]].groupby('DeviceId').agg([np.min, np.max])) display(full_df.iloc[:, list(range(108, 144)) + [216]].groupby('DeviceId').agg([np.min, np.max])) display(full_df.iloc[:, list(range(144, 180)) + [216]].groupby('DeviceId').agg([np.min, np.max])) display(full_df.iloc[:, list(range(180, 216)) + [216]].groupby('DeviceId').agg([np.min, np.max])) # %% display(full_df.iloc[:, list(range(36)) + [216]].groupby('DeviceId').agg([np.mean])) display(full_df.iloc[:, list(range(36, 72)) + [216]].groupby('DeviceId').agg([np.mean])) display(full_df.iloc[:, list(range(72, 108)) + [216]].groupby('DeviceId').agg([np.mean])) display(full_df.iloc[:, list(range(108, 144)) + [216]].groupby('DeviceId').agg([np.mean])) display(full_df.iloc[:, list(range(144, 180)) + [216]].groupby('DeviceId').agg([np.mean])) display(full_df.iloc[:, list(range(180, 216)) + [216]].groupby('DeviceId').agg([np.mean])) # %% sns.boxplot(df.DeviceId, df.AccMgn_mean) # %% sns.boxplot(df.DeviceId, df.AccMgn_median) # %% sns.boxplot(df.DeviceId, df.GyrMgn_amax) # %% sns.pairplot(df.loc[df.DeviceId != 3, :], hue="DeviceId", vars=["AccMgn_mean", "AccMgn_median"], markers='.') # %% test = df.loc[df.DeviceId != '3', :] sns.swarmplot(data = test, x="DeviceId", y="RotMgn_median") # %% test = full_df.loc[:, :] sns.boxplot(data = test, x="DeviceId", y="GrvMgn_amax") # %% print('OneClassSVM') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = y[y == device_id] X_device = X.loc[y == device_id, :] X_non_device = X.loc[y != device_id, :] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_device, y_device, test_size=0.2) from sklearn.svm import OneClassSVM estimator = OneClassSVM(random_state = 12369) estimator.fit_predict(X_train) tp = np.mean(estimator.predict(X_test) == 1) fn = np.mean(estimator.predict(X_test) == -1) tn = np.mean(estimator.predict(X_non_device) == 1) fp = np.mean(estimator.predict(X_non_device) == 1) accuracy = (tp + tn) / (tp + tn + fn + fp) precision = tp / (tp + fp) recall = tp / (tp + fn) fscore = 2 * recall * precision / (recall + precision) accuracies.append(accuracy if not np.isnan(accuracy) else 0) precisions.append(precision if not np.isnan(precision) else 0) recalls.append(recall if not np.isnan(recall) else 0) fscores.append(fscore if not np.isnan(fscore) else 0) print(f'{device_id} - accuracy: {round(accuracy, 2)}, precision: {round(precision, 2)}, recall: {round(recall, 2)}') # print(f'{device_id} - Class acc: {round(np.mean(estimator.predict(X_test) == 1), 2)}, non-class acc: {round(np.mean(estimator.predict(X_non_device) == -1), 2)}') print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('IsolationForest') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = y[y == device_id] X_device = X.loc[y == device_id, :] X_non_device = X.loc[y != device_id, :] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_device, y_device, test_size=0.2) from sklearn.ensemble import IsolationForest estimator = IsolationForest(n_estimators = 10) estimator.fit(X_train) tp = np.mean(estimator.predict(X_test) == 1) fn = np.mean(estimator.predict(X_test) == -1) tn = np.mean(estimator.predict(X_non_device) == 1) fp = np.mean(estimator.predict(X_non_device) == 1) accuracy = (tp + tn) / (tp + tn + fn + fp) precision = tp / (tp + fp) recall = tp / (tp + fn) fscore = 2 * recall * precision / (recall + precision) accuracies.append(accuracy if not np.isnan(accuracy) else 0) precisions.append(precision if not np.isnan(precision) else 0) recalls.append(recall if not np.isnan(recall) else 0) fscores.append(fscore if not np.isnan(fscore) else 0) print(f'{device_id} - accuracy: {round(accuracy, 2)}, precision: {round(precision, 2)}, recall: {round(recall, 2)}') # print(f'{device_id} - Class acc: {round(np.mean(estimator.predict(X_device) == 1), 2)}, non-class acc: {round(np.mean(estimator.predict(X_non_device) == -1), 2)}') print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('LOF') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = y[y == device_id] X_device = X.loc[y == device_id, :] X_non_device = X.loc[y != device_id, :] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_device, y_device, test_size=0.2) from sklearn.neighbors import LocalOutlierFactor estimator = LocalOutlierFactor(n_neighbors = 10, novelty = True, contamination = 'auto') estimator.fit(X_train) tp = np.mean(estimator.predict(X_test) == 1) fn = np.mean(estimator.predict(X_test) == -1) tn = np.mean(estimator.predict(X_non_device) == 1) fp = np.mean(estimator.predict(X_non_device) == 1) accuracy = (tp + tn) / (tp + tn + fn + fp) precision = tp / (tp + fp) recall = tp / (tp + fn) fscore = 2 * recall * precision / (recall + precision) accuracies.append(accuracy if not np.isnan(accuracy) else 0) precisions.append(precision if not np.isnan(precision) else 0) recalls.append(recall if not np.isnan(recall) else 0) fscores.append(fscore if not np.isnan(fscore) else 0) print(f'{device_id} - accuracy: {round(accuracy, 2)}, precision: {round(precision, 2)}, recall: {round(recall, 2)}') # print(f'{device_id} - Class acc: {round(np.mean(estimator.predict(X_device) == 1), 2)}, non-class acc: {round(np.mean(estimator.predict(X_non_device) == -1), 2)}') print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('LinearSVC') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369) from sklearn.svm import LinearSVC estimator = LinearSVC(random_state = 12369) estimator.fit(X_train, y_train) from sklearn.metrics import classification_report print(classification_report(y_test, estimator.predict(X_test))) report = classification_report(y_test, estimator.predict(X_test), output_dict=True) accuracies.append(report['accuracy']) precisions.append(report['1']['precision']) recalls.append(report['1']['recall']) fscores.append(report['1']['f1-score']) print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('KNeighborsClassifier') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369) from sklearn.neighbors import KNeighborsClassifier estimator = KNeighborsClassifier() estimator.fit(X_train, y_train) from sklearn.metrics import classification_report print(classification_report(y_test, estimator.predict(X_test))) report = classification_report(y_test, estimator.predict(X_test), output_dict=True) accuracies.append(report['accuracy']) precisions.append(report['1']['precision']) recalls.append(report['1']['recall']) fscores.append(report['1']['f1-score']) print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('GaussianNB') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369) from sklearn.naive_bayes import GaussianNB estimator = GaussianNB() estimator.fit(X_train, y_train) from sklearn.metrics import classification_report print(classification_report(y_test, estimator.predict(X_test))) report = classification_report(y_test, estimator.predict(X_test), output_dict=True) accuracies.append(report['accuracy']) precisions.append(report['1']['precision']) recalls.append(report['1']['recall']) fscores.append(report['1']['f1-score']) print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('RandomForestClassifier') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369) from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier(random_state = 12369) estimator.fit(X_train, y_train) from sklearn.metrics import classification_report print(classification_report(y_test, estimator.predict(X_test))) report = classification_report(y_test, estimator.predict(X_test), output_dict=True) accuracies.append(report['accuracy']) precisions.append(report['1']['precision']) recalls.append(report['1']['recall']) fscores.append(report['1']['f1-score']) print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('RandomForestClassifier - global model') from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = 12369) from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier(random_state = 12369) estimator.fit(X_train, y_train) from sklearn.metrics import classification_report print(classification_report(y_test, estimator.predict(X_test))) # %% print('RandomForestClassifier - standardized') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.preprocessing import StandardScaler X_std = StandardScaler().fit_transform(X) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_std, y_device, test_size=0.2, random_state = 12369) from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier(random_state = 12369) estimator.fit(X_train, y_train) from sklearn.metrics import classification_report print(classification_report(y_test, estimator.predict(X_test))) report = classification_report(y_test, estimator.predict(X_test), output_dict=True) accuracies.append(report['accuracy']) precisions.append(report['1']['precision']) recalls.append(report['1']['recall']) fscores.append(report['1']['f1-score']) print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('RandomForestClassifier + RFECV') from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = 12369) from yellowbrick.model_selection import RFECV from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369) selector = RFECV(estimator, cv = 5, scoring='f1_weighted', step = 0.05) selector.fit(X_train, y_train) selector.show() from sklearn.metrics import classification_report print(classification_report(y_test, selector.predict(X_test))) # %% print('RandomForestClassifier + RFE20') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369) from sklearn.feature_selection import RFE from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369) selector = RFE(estimator, n_features_to_select = 20, step = 0.05) selector.fit(X_train, y_train) from sklearn.metrics import classification_report print(f'Device {device_id}:') print(classification_report(y_test, selector.predict(X_test))) report = classification_report(y_test, selector.predict(X_test), output_dict=True) accuracies.append(report['accuracy']) precisions.append(report['1']['precision']) recalls.append(report['1']['recall']) fscores.append(report['1']['f1-score']) print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('RandomForestClassifier + SelectFromModel') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369) from sklearn.feature_selection import SelectFromModel from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369) selector = SelectFromModel(estimator, max_features = 20) selector.fit(X_train, y_train) from sklearn.metrics import classification_report print(f'Device {device_id}:') print(classification_report(y_test, selector.estimator_.predict(X_test))) report = classification_report(y_test, selector.estimator_.predict(X_test), output_dict=True) accuracies.append(report['accuracy']) precisions.append(report['1']['precision']) recalls.append(report['1']['recall']) fscores.append(report['1']['f1-score']) print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('RandomForestClassifier + PCA') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369) from sklearn.decomposition import PCA pca = PCA(n_components=20).fit(X_train) X_train = pca.transform(X_train) X_test = pca.transform(X_test) from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369) estimator.fit(X_train, y_train) from sklearn.metrics import classification_report print(f'Device {device_id}:') print(classification_report(y_test, estimator.predict(X_test))) report = classification_report(y_test, estimator.predict(X_test), output_dict=True) accuracies.append(report['accuracy']) precisions.append(report['1']['precision']) recalls.append(report['1']['recall']) fscores.append(report['1']['f1-score']) print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('RandomForestClassifier + SelectKBest (f_classif)') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369) from sklearn.feature_selection import SelectKBest, f_classif selector = SelectKBest(score_func = f_classif, k=20).fit(X_train, y_train) X_train = selector.transform(X_train) X_test = selector.transform(X_test) from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369) estimator.fit(X_train, y_train) from sklearn.metrics import classification_report print(f'Device {device_id}:') print(classification_report(y_test, estimator.predict(X_test))) report = classification_report(y_test, estimator.predict(X_test), output_dict=True) accuracies.append(report['accuracy']) precisions.append(report['1']['precision']) recalls.append(report['1']['recall']) fscores.append(report['1']['f1-score']) print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('RandomForestClassifier + SelectKBest (mutual_info_classif)') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369) from sklearn.feature_selection import SelectKBest, mutual_info_classif selector = SelectKBest(score_func = mutual_info_classif, k=20).fit(X_train, y_train) X_train = selector.transform(X_train) X_test = selector.transform(X_test) from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369) estimator.fit(X_train, y_train) from sklearn.metrics import classification_report print(f'Device {device_id}:') print(classification_report(y_test, estimator.predict(X_test))) report = classification_report(y_test, estimator.predict(X_test), output_dict=True) accuracies.append(report['accuracy']) precisions.append(report['1']['precision']) recalls.append(report['1']['recall']) fscores.append(report['1']['f1-score']) print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('RandomForestClassifier + RandomUnderSampler') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369) from imblearn.under_sampling import RandomUnderSampler X_oversampled, y_oversampled = RandomUnderSampler().fit_resample(X_train, y_train) from sklearn.feature_selection import RFE from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369) selector = RFE(estimator, n_features_to_select = 20, step = 0.05) selector.fit(X_oversampled, y_oversampled) from sklearn.metrics import classification_report print(f'Device {device_id}:') print(classification_report(y_test, selector.predict(X_test))) report = classification_report(y_test, selector.predict(X_test), output_dict=True) accuracies.append(report['accuracy']) precisions.append(report['1']['precision']) recalls.append(report['1']['recall']) fscores.append(report['1']['f1-score']) print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('RandomForestClassifier + RandomOverSampler') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369) from imblearn.over_sampling import RandomOverSampler X_oversampled, y_oversampled = RandomOverSampler().fit_resample(X_train, y_train) from sklearn.feature_selection import RFE from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369) selector = RFE(estimator, n_features_to_select = 20, step = 0.05) selector.fit(X_oversampled, y_oversampled) from sklearn.metrics import classification_report print(f'Device {device_id}:') print(classification_report(y_test, selector.predict(X_test))) report = classification_report(y_test, selector.predict(X_test), output_dict=True) accuracies.append(report['accuracy']) precisions.append(report['1']['precision']) recalls.append(report['1']['recall']) fscores.append(report['1']['f1-score']) print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('RandomForestClassifier + SMOTE') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369) from imblearn.over_sampling import SMOTE X_oversampled, y_oversampled = SMOTE().fit_resample(X_train, y_train) from sklearn.feature_selection import RFE from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369) selector = RFE(estimator, n_features_to_select = 20, step = 0.05) selector.fit(X_oversampled, y_oversampled) from sklearn.metrics import classification_report print(f'Device {device_id}:') print(classification_report(y_test, selector.predict(X_test))) report = classification_report(y_test, selector.predict(X_test), output_dict=True) accuracies.append(report['accuracy']) precisions.append(report['1']['precision']) recalls.append(report['1']['recall']) fscores.append(report['1']['f1-score']) print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('RandomForestClassifier + SMOTEENN') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369) from imblearn.combine import SMOTEENN X_oversampled, y_oversampled = SMOTEENN().fit_resample(X_train, y_train) from sklearn.feature_selection import RFE from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369) selector = RFE(estimator, n_features_to_select = 20, step = 0.05) selector.fit(X_oversampled, y_oversampled) from sklearn.metrics import classification_report print(f'Device {device_id}:') print(classification_report(y_test, selector.predict(X_test))) report = classification_report(y_test, selector.predict(X_test), output_dict=True) accuracies.append(report['accuracy']) precisions.append(report['1']['precision']) recalls.append(report['1']['recall']) fscores.append(report['1']['f1-score']) print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('RandomForestClassifier + SMOTETomek') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369) from imblearn.combine import SMOTETomek X_oversampled, y_oversampled = SMOTETomek().fit_resample(X_train, y_train) from sklearn.feature_selection import RFE from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier(n_estimators = 10, random_state = 12369) selector = RFE(estimator, n_features_to_select = 20, step = 0.05) selector.fit(X_oversampled, y_oversampled) from sklearn.metrics import classification_report print(f'Device {device_id}:') print(classification_report(y_test, selector.predict(X_test))) report = classification_report(y_test, selector.predict(X_test), output_dict=True) accuracies.append(report['accuracy']) precisions.append(report['1']['precision']) recalls.append(report['1']['recall']) fscores.append(report['1']['f1-score']) print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('BalancedRandomForestClassifier') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369) from sklearn.feature_selection import RFE from imblearn.ensemble import BalancedRandomForestClassifier estimator = BalancedRandomForestClassifier(n_estimators = 10, random_state = 12369) selector = RFE(estimator, n_features_to_select = 20, step = 0.05) selector.fit(X_train, y_train) from sklearn.metrics import classification_report print(f'Device {device_id}:') print(classification_report(y_test, selector.predict(X_test))) report = classification_report(y_test, selector.predict(X_test), output_dict=True) accuracies.append(report['accuracy']) precisions.append(report['1']['precision']) recalls.append(report['1']['recall']) fscores.append(report['1']['f1-score']) print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %% print('Hyperparameter tuning') for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369) from imblearn.combine import SMOTETomek X_oversampled, y_oversampled = SMOTETomek().fit_resample(X_train, y_train) from sklearn.feature_selection import RFE from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier(random_state = 12369, \ n_estimators = 50, min_samples_leaf = 1, \ min_samples_split = 2, \ bootstrap = False, \ max_features = 'sqrt', \ max_depth = 20) selector = RFE(estimator, n_features_to_select = 20, step = 0.05) # from sklearn.model_selection import GridSearchCV # param_grid = { # 'estimator__n_estimators': [10, 50, 100, 200, 500], # 'estimator__max_features': ['auto', 'sqrt', 'log2'], # 'estimator__max_depth': [4, 5, 6, 7, 8], # 'estimator__criterion': ['gini', 'entropy'] # } from sklearn.model_selection import RandomizedSearchCV param_grid = { 'estimator__n_estimators': [10, 20, 50, 100], 'estimator__max_features': ['auto', 'sqrt', 'log2'], 'estimator__max_depth': [int(x) for x in np.linspace(2, 20, num = 2)] + [None], 'estimator__min_samples_split': [2, 3, 4, 5], 'estimator__min_samples_leaf': [1, 2, 3], 'estimator__bootstrap': [True, False] } grid = RandomizedSearchCV(estimator = selector, \ param_distributions = param_grid, \ n_iter = 100, \ cv = 3, \ verbose = 2, \ random_state = 42, \ n_jobs = -1) grid.fit(X_oversampled, y_oversampled) print(grid.best_params_) # %% print('RandomForestClassifier + SMOTETomek + parameters') accuracies = [] precisions = [] recalls = [] fscores = [] for device_id in y.unique(): y_device = np.where(y == device_id, 1, 0) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y_device, test_size=0.2, random_state = 12369) from imblearn.combine import SMOTETomek X_oversampled, y_oversampled = SMOTETomek().fit_resample(X_train, y_train) from sklearn.feature_selection import RFE from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier(random_state = 12369, \ n_estimators = 50, min_samples_leaf = 1, \ min_samples_split = 2, \ bootstrap = False, \ max_features = 'sqrt', \ max_depth = 20) selector = RFE(estimator, n_features_to_select = 20, step = 0.05) selector.fit(X_oversampled, y_oversampled) from sklearn.metrics import classification_report print(f'Device {device_id}:') print(classification_report(y_test, selector.predict(X_test))) report = classification_report(y_test, selector.predict(X_test), output_dict=True) accuracies.append(report['accuracy']) precisions.append(report['1']['precision']) recalls.append(report['1']['recall']) fscores.append(report['1']['f1-score']) print(f'Accuracy: {round(np.mean(accuracies), 2)}, precision: {round(np.mean(precisions), 2)}, recall: {round(np.mean(recalls), 2)}, fscore: {round(np.mean(fscores), 2)}') # %%
[ "django.setup", "sklearn.preprocessing.StandardScaler", "sklearn.model_selection.train_test_split", "sklearn.neighbors.LocalOutlierFactor", "sklearn.feature_selection.RFE", "numpy.isnan", "sklearn.feature_selection.SelectFromModel", "numpy.mean", "seaborn.pairplot", "sklearn.model_selection.RandomizedSearchCV", "seaborn.swarmplot", "MAKDataHub.services.Services.storage_service", "MAKDataHub.services.Services.profile_service", "numpy.linspace", "sklearn.svm.LinearSVC", "sklearn.svm.OneClassSVM", "sklearn.ensemble.RandomForestClassifier", "imblearn.under_sampling.RandomUnderSampler", "imblearn.combine.SMOTETomek", "imblearn.ensemble.BalancedRandomForestClassifier", "seaborn.boxplot", "imblearn.over_sampling.SMOTE", "sklearn.naive_bayes.GaussianNB", "sklearn.ensemble.IsolationForest", "yellowbrick.model_selection.RFECV", "imblearn.combine.SMOTEENN", "imblearn.over_sampling.RandomOverSampler", "sklearn.neighbors.KNeighborsClassifier", "numpy.where", "sklearn.decomposition.PCA", "sklearn.feature_selection.SelectKBest" ]
[((124, 138), 'django.setup', 'django.setup', ([], {}), '()\n', (136, 138), False, 'import django\n'), ((325, 351), 'MAKDataHub.services.Services.profile_service', 'Services.profile_service', ([], {}), '()\n', (349, 351), False, 'from MAKDataHub.services import Services\n'), ((370, 396), 'MAKDataHub.services.Services.storage_service', 'Services.storage_service', ([], {}), '()\n', (394, 396), False, 'from MAKDataHub.services import Services\n'), ((2287, 2327), 'seaborn.boxplot', 'sns.boxplot', (['df.DeviceId', 'df.AccMgn_mean'], {}), '(df.DeviceId, df.AccMgn_mean)\n', (2298, 2327), True, 'import seaborn as sns\n'), ((2334, 2376), 'seaborn.boxplot', 'sns.boxplot', (['df.DeviceId', 'df.AccMgn_median'], {}), '(df.DeviceId, df.AccMgn_median)\n', (2345, 2376), True, 'import seaborn as sns\n'), ((2383, 2423), 'seaborn.boxplot', 'sns.boxplot', (['df.DeviceId', 'df.GyrMgn_amax'], {}), '(df.DeviceId, df.GyrMgn_amax)\n', (2394, 2423), True, 'import seaborn as sns\n'), ((2430, 2544), 'seaborn.pairplot', 'sns.pairplot', (['df.loc[df.DeviceId != 3, :]'], {'hue': '"""DeviceId"""', 'vars': "['AccMgn_mean', 'AccMgn_median']", 'markers': '"""."""'}), "(df.loc[df.DeviceId != 3, :], hue='DeviceId', vars=[\n 'AccMgn_mean', 'AccMgn_median'], markers='.')\n", (2442, 2544), True, 'import seaborn as sns\n'), ((2583, 2640), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'data': 'test', 'x': '"""DeviceId"""', 'y': '"""RotMgn_median"""'}), "(data=test, x='DeviceId', y='RotMgn_median')\n", (2596, 2640), True, 'import seaborn as sns\n'), ((2674, 2727), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'test', 'x': '"""DeviceId"""', 'y': '"""GrvMgn_amax"""'}), "(data=test, x='DeviceId', y='GrvMgn_amax')\n", (2685, 2727), True, 'import seaborn as sns\n'), ((11807, 11864), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y, test_size=0.2, random_state=12369)\n', (11823, 11864), False, 'from sklearn.model_selection import train_test_split\n'), ((11933, 11975), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(12369)'}), '(random_state=12369)\n', (11955, 11975), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((13439, 13496), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y, test_size=0.2, random_state=12369)\n', (13455, 13496), False, 'from sklearn.model_selection import train_test_split\n'), ((13611, 13670), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (13633, 13670), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((13686, 13742), 'yellowbrick.model_selection.RFECV', 'RFECV', (['estimator'], {'cv': '(5)', 'scoring': '"""f1_weighted"""', 'step': '(0.05)'}), "(estimator, cv=5, scoring='f1_weighted', step=0.05)\n", (13691, 13742), False, 'from yellowbrick.model_selection import RFECV\n'), ((3058, 3109), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_device', 'y_device'], {'test_size': '(0.2)'}), '(X_device, y_device, test_size=0.2)\n', (3074, 3109), False, 'from sklearn.model_selection import train_test_split\n'), ((3168, 3199), 'sklearn.svm.OneClassSVM', 'OneClassSVM', ([], {'random_state': '(12369)'}), '(random_state=12369)\n', (3179, 3199), False, 'from sklearn.svm import OneClassSVM\n'), ((4658, 4709), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_device', 'y_device'], {'test_size': '(0.2)'}), '(X_device, y_device, test_size=0.2)\n', (4674, 4709), False, 'from sklearn.model_selection import train_test_split\n'), ((4777, 4809), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (4792, 4809), False, 'from sklearn.ensemble import IsolationForest\n'), ((6250, 6301), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_device', 'y_device'], {'test_size': '(0.2)'}), '(X_device, y_device, test_size=0.2)\n', (6266, 6301), False, 'from sklearn.model_selection import train_test_split\n'), ((6373, 6443), 'sklearn.neighbors.LocalOutlierFactor', 'LocalOutlierFactor', ([], {'n_neighbors': '(10)', 'novelty': '(True)', 'contamination': '"""auto"""'}), "(n_neighbors=10, novelty=True, contamination='auto')\n", (6391, 6443), False, 'from sklearn.neighbors import LocalOutlierFactor\n'), ((7695, 7725), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (7703, 7725), True, 'import numpy as np\n'), ((7823, 7887), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (7839, 7887), False, 'from sklearn.model_selection import train_test_split\n'), ((7946, 7975), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(12369)'}), '(random_state=12369)\n', (7955, 7975), False, 'from sklearn.svm import LinearSVC\n'), ((8714, 8744), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (8722, 8744), True, 'import numpy as np\n'), ((8842, 8906), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (8858, 8906), False, 'from sklearn.model_selection import train_test_split\n'), ((8982, 9004), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (9002, 9004), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((9735, 9765), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (9743, 9765), True, 'import numpy as np\n'), ((9863, 9927), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (9879, 9927), False, 'from sklearn.model_selection import train_test_split\n'), ((9995, 10007), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (10005, 10007), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((10750, 10780), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (10758, 10780), True, 'import numpy as np\n'), ((10878, 10942), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (10894, 10942), False, 'from sklearn.model_selection import train_test_split\n'), ((11019, 11061), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(12369)'}), '(random_state=12369)\n', (11041, 11061), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((12281, 12311), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (12289, 12311), True, 'import numpy as np\n'), ((12513, 12581), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_std', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X_std, y_device, test_size=0.2, random_state=12369)\n', (12529, 12581), False, 'from sklearn.model_selection import train_test_split\n'), ((12658, 12700), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(12369)'}), '(random_state=12369)\n', (12680, 12700), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((14058, 14088), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (14066, 14088), True, 'import numpy as np\n'), ((14186, 14250), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (14202, 14250), False, 'from sklearn.model_selection import train_test_split\n'), ((14377, 14436), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (14399, 14436), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((14456, 14506), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (14459, 14506), False, 'from sklearn.feature_selection import RFE\n'), ((15302, 15332), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (15310, 15332), True, 'import numpy as np\n'), ((15430, 15494), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (15446, 15494), False, 'from sklearn.model_selection import train_test_split\n'), ((15629, 15688), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (15651, 15688), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((15708, 15751), 'sklearn.feature_selection.SelectFromModel', 'SelectFromModel', (['estimator'], {'max_features': '(20)'}), '(estimator, max_features=20)\n', (15723, 15751), False, 'from sklearn.feature_selection import SelectFromModel\n'), ((16555, 16585), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (16563, 16585), True, 'import numpy as np\n'), ((16683, 16747), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (16699, 16747), False, 'from sklearn.model_selection import train_test_split\n'), ((16987, 17046), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (17009, 17046), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((17853, 17883), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (17861, 17883), True, 'import numpy as np\n'), ((17981, 18045), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (17997, 18045), False, 'from sklearn.model_selection import train_test_split\n'), ((18353, 18412), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (18375, 18412), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((19225, 19255), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (19233, 19255), True, 'import numpy as np\n'), ((19353, 19417), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (19369, 19417), False, 'from sklearn.model_selection import train_test_split\n'), ((19745, 19804), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (19767, 19804), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((20602, 20632), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (20610, 20632), True, 'import numpy as np\n'), ((20730, 20794), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (20746, 20794), False, 'from sklearn.model_selection import train_test_split\n'), ((21068, 21127), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (21090, 21127), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((21147, 21197), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (21150, 21197), False, 'from sklearn.feature_selection import RFE\n'), ((22007, 22037), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (22015, 22037), True, 'import numpy as np\n'), ((22135, 22199), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (22151, 22199), False, 'from sklearn.model_selection import train_test_split\n'), ((22470, 22529), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (22492, 22529), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((22549, 22599), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (22552, 22599), False, 'from sklearn.feature_selection import RFE\n'), ((23397, 23427), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (23405, 23427), True, 'import numpy as np\n'), ((23525, 23589), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (23541, 23589), False, 'from sklearn.model_selection import train_test_split\n'), ((23836, 23895), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (23858, 23895), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((23915, 23965), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (23918, 23965), False, 'from sklearn.feature_selection import RFE\n'), ((24766, 24796), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (24774, 24796), True, 'import numpy as np\n'), ((24894, 24958), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (24910, 24958), False, 'from sklearn.model_selection import train_test_split\n'), ((25205, 25264), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (25227, 25264), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((25284, 25334), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (25287, 25334), False, 'from sklearn.feature_selection import RFE\n'), ((26136, 26166), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (26144, 26166), True, 'import numpy as np\n'), ((26264, 26328), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (26280, 26328), False, 'from sklearn.model_selection import train_test_split\n'), ((26579, 26638), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (26601, 26638), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((26658, 26708), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (26661, 26708), False, 'from sklearn.feature_selection import RFE\n'), ((27505, 27535), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (27513, 27535), True, 'import numpy as np\n'), ((27633, 27697), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (27649, 27697), False, 'from sklearn.model_selection import train_test_split\n'), ((27833, 27900), 'imblearn.ensemble.BalancedRandomForestClassifier', 'BalancedRandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(12369)'}), '(n_estimators=10, random_state=12369)\n', (27863, 27900), False, 'from imblearn.ensemble import BalancedRandomForestClassifier\n'), ((27920, 27970), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (27923, 27970), False, 'from sklearn.feature_selection import RFE\n'), ((28688, 28718), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (28696, 28718), True, 'import numpy as np\n'), ((28816, 28880), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (28832, 28880), False, 'from sklearn.model_selection import train_test_split\n'), ((29131, 29292), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(12369)', 'n_estimators': '(50)', 'min_samples_leaf': '(1)', 'min_samples_split': '(2)', 'bootstrap': '(False)', 'max_features': '"""sqrt"""', 'max_depth': '(20)'}), "(random_state=12369, n_estimators=50,\n min_samples_leaf=1, min_samples_split=2, bootstrap=False, max_features=\n 'sqrt', max_depth=20)\n", (29153, 29292), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((29371, 29421), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (29374, 29421), False, 'from sklearn.feature_selection import RFE\n'), ((30190, 30321), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', ([], {'estimator': 'selector', 'param_distributions': 'param_grid', 'n_iter': '(100)', 'cv': '(3)', 'verbose': '(2)', 'random_state': '(42)', 'n_jobs': '(-1)'}), '(estimator=selector, param_distributions=param_grid,\n n_iter=100, cv=3, verbose=2, random_state=42, n_jobs=-1)\n', (30208, 30321), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((30635, 30665), 'numpy.where', 'np.where', (['(y == device_id)', '(1)', '(0)'], {}), '(y == device_id, 1, 0)\n', (30643, 30665), True, 'import numpy as np\n'), ((30763, 30827), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_device'], {'test_size': '(0.2)', 'random_state': '(12369)'}), '(X, y_device, test_size=0.2, random_state=12369)\n', (30779, 30827), False, 'from sklearn.model_selection import train_test_split\n'), ((31078, 31239), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(12369)', 'n_estimators': '(50)', 'min_samples_leaf': '(1)', 'min_samples_split': '(2)', 'bootstrap': '(False)', 'max_features': '"""sqrt"""', 'max_depth': '(20)'}), "(random_state=12369, n_estimators=50,\n min_samples_leaf=1, min_samples_split=2, bootstrap=False, max_features=\n 'sqrt', max_depth=20)\n", (31100, 31239), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((31318, 31368), 'sklearn.feature_selection.RFE', 'RFE', (['estimator'], {'n_features_to_select': '(20)', 'step': '(0.05)'}), '(estimator, n_features_to_select=20, step=0.05)\n', (31321, 31368), False, 'from sklearn.feature_selection import RFE\n'), ((12378, 12394), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (12392, 12394), False, 'from sklearn.preprocessing import StandardScaler\n'), ((16807, 16827), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(20)'}), '(n_components=20)\n', (16810, 16827), False, 'from sklearn.decomposition import PCA\n'), ((18133, 18172), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'score_func': 'f_classif', 'k': '(20)'}), '(score_func=f_classif, k=20)\n', (18144, 18172), False, 'from sklearn.feature_selection import SelectKBest, mutual_info_classif\n'), ((19515, 19564), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'score_func': 'mutual_info_classif', 'k': '(20)'}), '(score_func=mutual_info_classif, k=20)\n', (19526, 19564), False, 'from sklearn.feature_selection import SelectKBest, mutual_info_classif\n'), ((20892, 20912), 'imblearn.under_sampling.RandomUnderSampler', 'RandomUnderSampler', ([], {}), '()\n', (20910, 20912), False, 'from imblearn.under_sampling import RandomUnderSampler\n'), ((22295, 22314), 'imblearn.over_sampling.RandomOverSampler', 'RandomOverSampler', ([], {}), '()\n', (22312, 22314), False, 'from imblearn.over_sampling import RandomOverSampler\n'), ((23673, 23680), 'imblearn.over_sampling.SMOTE', 'SMOTE', ([], {}), '()\n', (23678, 23680), False, 'from imblearn.over_sampling import SMOTE\n'), ((25039, 25049), 'imblearn.combine.SMOTEENN', 'SMOTEENN', ([], {}), '()\n', (25047, 25049), False, 'from imblearn.combine import SMOTEENN\n'), ((26411, 26423), 'imblearn.combine.SMOTETomek', 'SMOTETomek', ([], {}), '()\n', (26421, 26423), False, 'from imblearn.combine import SMOTETomek\n'), ((28963, 28975), 'imblearn.combine.SMOTETomek', 'SMOTETomek', ([], {}), '()\n', (28973, 28975), False, 'from imblearn.combine import SMOTETomek\n'), ((30910, 30922), 'imblearn.combine.SMOTETomek', 'SMOTETomek', ([], {}), '()\n', (30920, 30922), False, 'from imblearn.combine import SMOTETomek\n'), ((3652, 3670), 'numpy.isnan', 'np.isnan', (['accuracy'], {}), '(accuracy)\n', (3660, 3670), True, 'import numpy as np\n'), ((3718, 3737), 'numpy.isnan', 'np.isnan', (['precision'], {}), '(precision)\n', (3726, 3737), True, 'import numpy as np\n'), ((3779, 3795), 'numpy.isnan', 'np.isnan', (['recall'], {}), '(recall)\n', (3787, 3795), True, 'import numpy as np\n'), ((3837, 3853), 'numpy.isnan', 'np.isnan', (['fscore'], {}), '(fscore)\n', (3845, 3853), True, 'import numpy as np\n'), ((4178, 4197), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (4185, 4197), True, 'import numpy as np\n'), ((4222, 4241), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (4229, 4241), True, 'import numpy as np\n'), ((4263, 4279), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (4270, 4279), True, 'import numpy as np\n'), ((4301, 4317), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (4308, 4317), True, 'import numpy as np\n'), ((5254, 5272), 'numpy.isnan', 'np.isnan', (['accuracy'], {}), '(accuracy)\n', (5262, 5272), True, 'import numpy as np\n'), ((5320, 5339), 'numpy.isnan', 'np.isnan', (['precision'], {}), '(precision)\n', (5328, 5339), True, 'import numpy as np\n'), ((5381, 5397), 'numpy.isnan', 'np.isnan', (['recall'], {}), '(recall)\n', (5389, 5397), True, 'import numpy as np\n'), ((5439, 5455), 'numpy.isnan', 'np.isnan', (['fscore'], {}), '(fscore)\n', (5447, 5455), True, 'import numpy as np\n'), ((5782, 5801), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (5789, 5801), True, 'import numpy as np\n'), ((5826, 5845), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (5833, 5845), True, 'import numpy as np\n'), ((5867, 5883), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (5874, 5883), True, 'import numpy as np\n'), ((5905, 5921), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (5912, 5921), True, 'import numpy as np\n'), ((6892, 6910), 'numpy.isnan', 'np.isnan', (['accuracy'], {}), '(accuracy)\n', (6900, 6910), True, 'import numpy as np\n'), ((6958, 6977), 'numpy.isnan', 'np.isnan', (['precision'], {}), '(precision)\n', (6966, 6977), True, 'import numpy as np\n'), ((7019, 7035), 'numpy.isnan', 'np.isnan', (['recall'], {}), '(recall)\n', (7027, 7035), True, 'import numpy as np\n'), ((7077, 7093), 'numpy.isnan', 'np.isnan', (['fscore'], {}), '(fscore)\n', (7085, 7093), True, 'import numpy as np\n'), ((7420, 7439), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (7427, 7439), True, 'import numpy as np\n'), ((7464, 7483), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (7471, 7483), True, 'import numpy as np\n'), ((7505, 7521), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (7512, 7521), True, 'import numpy as np\n'), ((7543, 7559), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (7550, 7559), True, 'import numpy as np\n'), ((8428, 8447), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (8435, 8447), True, 'import numpy as np\n'), ((8472, 8491), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (8479, 8491), True, 'import numpy as np\n'), ((8513, 8529), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (8520, 8529), True, 'import numpy as np\n'), ((8551, 8567), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (8558, 8567), True, 'import numpy as np\n'), ((9459, 9478), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (9466, 9478), True, 'import numpy as np\n'), ((9503, 9522), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (9510, 9522), True, 'import numpy as np\n'), ((9544, 9560), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (9551, 9560), True, 'import numpy as np\n'), ((9582, 9598), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (9589, 9598), True, 'import numpy as np\n'), ((10462, 10481), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (10469, 10481), True, 'import numpy as np\n'), ((10506, 10525), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (10513, 10525), True, 'import numpy as np\n'), ((10547, 10563), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (10554, 10563), True, 'import numpy as np\n'), ((10585, 10601), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (10592, 10601), True, 'import numpy as np\n'), ((11518, 11537), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (11525, 11537), True, 'import numpy as np\n'), ((11562, 11581), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (11569, 11581), True, 'import numpy as np\n'), ((11603, 11619), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (11610, 11619), True, 'import numpy as np\n'), ((11641, 11657), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (11648, 11657), True, 'import numpy as np\n'), ((13157, 13176), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (13164, 13176), True, 'import numpy as np\n'), ((13201, 13220), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (13208, 13220), True, 'import numpy as np\n'), ((13242, 13258), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (13249, 13258), True, 'import numpy as np\n'), ((13280, 13296), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (13287, 13296), True, 'import numpy as np\n'), ((14996, 15015), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (15003, 15015), True, 'import numpy as np\n'), ((15040, 15059), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (15047, 15059), True, 'import numpy as np\n'), ((15081, 15097), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (15088, 15097), True, 'import numpy as np\n'), ((15119, 15135), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (15126, 15135), True, 'import numpy as np\n'), ((16261, 16280), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (16268, 16280), True, 'import numpy as np\n'), ((16305, 16324), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (16312, 16324), True, 'import numpy as np\n'), ((16346, 16362), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (16353, 16362), True, 'import numpy as np\n'), ((16384, 16400), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (16391, 16400), True, 'import numpy as np\n'), ((17539, 17558), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (17546, 17558), True, 'import numpy as np\n'), ((17583, 17602), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (17590, 17602), True, 'import numpy as np\n'), ((17624, 17640), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (17631, 17640), True, 'import numpy as np\n'), ((17662, 17678), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (17669, 17678), True, 'import numpy as np\n'), ((18901, 18920), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (18908, 18920), True, 'import numpy as np\n'), ((18945, 18964), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (18952, 18964), True, 'import numpy as np\n'), ((18986, 19002), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (18993, 19002), True, 'import numpy as np\n'), ((19024, 19040), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (19031, 19040), True, 'import numpy as np\n'), ((20293, 20312), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (20300, 20312), True, 'import numpy as np\n'), ((20337, 20356), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (20344, 20356), True, 'import numpy as np\n'), ((20378, 20394), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (20385, 20394), True, 'import numpy as np\n'), ((20416, 20432), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (20423, 20432), True, 'import numpy as np\n'), ((21699, 21718), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (21706, 21718), True, 'import numpy as np\n'), ((21743, 21762), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (21750, 21762), True, 'import numpy as np\n'), ((21784, 21800), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (21791, 21800), True, 'import numpy as np\n'), ((21822, 21838), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (21829, 21838), True, 'import numpy as np\n'), ((23101, 23120), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (23108, 23120), True, 'import numpy as np\n'), ((23145, 23164), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (23152, 23164), True, 'import numpy as np\n'), ((23186, 23202), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (23193, 23202), True, 'import numpy as np\n'), ((23224, 23240), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (23231, 23240), True, 'import numpy as np\n'), ((24467, 24486), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (24474, 24486), True, 'import numpy as np\n'), ((24511, 24530), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (24518, 24530), True, 'import numpy as np\n'), ((24552, 24568), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (24559, 24568), True, 'import numpy as np\n'), ((24590, 24606), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (24597, 24606), True, 'import numpy as np\n'), ((25836, 25855), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (25843, 25855), True, 'import numpy as np\n'), ((25880, 25899), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (25887, 25899), True, 'import numpy as np\n'), ((25921, 25937), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (25928, 25937), True, 'import numpy as np\n'), ((25959, 25975), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (25966, 25975), True, 'import numpy as np\n'), ((27210, 27229), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (27217, 27229), True, 'import numpy as np\n'), ((27254, 27273), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (27261, 27273), True, 'import numpy as np\n'), ((27295, 27311), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (27302, 27311), True, 'import numpy as np\n'), ((27333, 27349), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (27340, 27349), True, 'import numpy as np\n'), ((28460, 28479), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (28467, 28479), True, 'import numpy as np\n'), ((28504, 28523), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (28511, 28523), True, 'import numpy as np\n'), ((28545, 28561), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (28552, 28561), True, 'import numpy as np\n'), ((28583, 28599), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (28590, 28599), True, 'import numpy as np\n'), ((31870, 31889), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (31877, 31889), True, 'import numpy as np\n'), ((31914, 31933), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (31921, 31933), True, 'import numpy as np\n'), ((31955, 31971), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (31962, 31971), True, 'import numpy as np\n'), ((31993, 32009), 'numpy.mean', 'np.mean', (['fscores'], {}), '(fscores)\n', (32000, 32009), True, 'import numpy as np\n'), ((29984, 30009), 'numpy.linspace', 'np.linspace', (['(2)', '(20)'], {'num': '(2)'}), '(2, 20, num=2)\n', (29995, 30009), True, 'import numpy as np\n')]
import sys from pdb import Pdb, getsourcelines from .utils import check_frame from bytefall._modules import sys as py_sys from bytefall._c_api import convert_to_builtin_frame from bytefall.config import EnvConfig __all__ = ['PdbWrapper'] class PdbWrapper(object): @staticmethod @check_frame def set_trace(frame, *args, **kwargs): return pdb_wrapper(frame)() def pdb_wrapper(this_frame): DEBUG_INTERNAL = EnvConfig().get('DEBUG_INTERNAL') _pdb = Pdb() if DEBUG_INTERNAL else _Pdb() def wrapper(): if DEBUG_INTERNAL: _pdb.set_trace(sys._getframe(3)) else: # Frame to be stepped in is not retrieved by `sys._getframe()`, # so that we don't need to pass its `f_back` into `set_trace()` _pdb.set_trace(this_frame) return wrapper class _Pdb(Pdb): def do_longlist(self, arg): filename = self.curframe.f_code.co_filename breaklist = self.get_file_breaks(filename) try: # Here we need to convert `self.curframe` to builtin frame # for `getsourcelines`, in which `inspect.findsource()` # requires a builtin frame to work. converted = convert_to_builtin_frame(self.curframe) lines, lineno = getsourcelines(converted) except OSError as err: self.error(err) return self._print_lines(lines, lineno, breaklist, self.curframe) do_ll = do_longlist def set_continue(self): self._set_stopinfo(self.botframe, None, -1) if not self.breaks: # Here we need to replace the implementation of `sys.settrace()` # and `sys._getframe()`. py_sys.settrace(None) # In the original implementation, here it calls # `sys._getframe().f_back` to get the caller of this method. # However, we cannot get caller `pyframe.Frame` that calling # `py_sys._getframe()`, but it does not affect the result. # Because the current running frame in vm is what we want here. frame = py_sys._getframe() while frame and frame is not self.botframe: del frame.f_trace frame = frame.f_back def set_trace(self, frame=None): self.reset() while frame: frame.f_trace = self.trace_dispatch self.botframe = frame frame = frame.f_back self.set_step() py_sys.settrace(self.trace_dispatch)
[ "pdb.Pdb", "bytefall._c_api.convert_to_builtin_frame", "bytefall.config.EnvConfig", "sys._getframe", "pdb.getsourcelines", "bytefall._modules.sys._getframe", "bytefall._modules.sys.settrace" ]
[((480, 485), 'pdb.Pdb', 'Pdb', ([], {}), '()\n', (483, 485), False, 'from pdb import Pdb, getsourcelines\n'), ((2479, 2515), 'bytefall._modules.sys.settrace', 'py_sys.settrace', (['self.trace_dispatch'], {}), '(self.trace_dispatch)\n', (2494, 2515), True, 'from bytefall._modules import sys as py_sys\n'), ((435, 446), 'bytefall.config.EnvConfig', 'EnvConfig', ([], {}), '()\n', (444, 446), False, 'from bytefall.config import EnvConfig\n'), ((1211, 1250), 'bytefall._c_api.convert_to_builtin_frame', 'convert_to_builtin_frame', (['self.curframe'], {}), '(self.curframe)\n', (1235, 1250), False, 'from bytefall._c_api import convert_to_builtin_frame\n'), ((1279, 1304), 'pdb.getsourcelines', 'getsourcelines', (['converted'], {}), '(converted)\n', (1293, 1304), False, 'from pdb import Pdb, getsourcelines\n'), ((1709, 1730), 'bytefall._modules.sys.settrace', 'py_sys.settrace', (['None'], {}), '(None)\n', (1724, 1730), True, 'from bytefall._modules import sys as py_sys\n'), ((2105, 2123), 'bytefall._modules.sys._getframe', 'py_sys._getframe', ([], {}), '()\n', (2121, 2123), True, 'from bytefall._modules import sys as py_sys\n'), ((590, 606), 'sys._getframe', 'sys._getframe', (['(3)'], {}), '(3)\n', (603, 606), False, 'import sys\n')]
import pylab as plt import numpy as np from math import * N=100 t0 = 0.0 t1 = 2.0 t = np.linspace(t0,t1,N) dt = (t1-t0)/N one = np.ones((N)) xp = np.zeros((N)) yp = np.zeros((N)) th = np.zeros((N)) x = t*t y = t plt.figure() plt.plot(x,y,'g-') plt.legend(['Path'],loc='best') plt.title('Quadratic Path') plt.show() doty=one dotx=2*t ddoty=0 ddotx=2*one r = 1.0 L = 4.0 v = np.sqrt(dotx*dotx + doty*doty) kappa = (dotx*ddoty - doty*ddotx)/(v*v*v) dotphi1 = (v/r)*(kappa*L +1) dotphi2 = (v/r)*(-kappa*L+1) plt.plot(t,dotphi1,'b-', t,dotphi2,'g-') plt.title('Wheel Speeds') plt.legend(['Right', 'Left'],loc='best') plt.show() xp[0] = 0.0 yp[0] = 0.0 th[0] = 1.5707963267949 for i in range(N-1): xp[i+1] = xp[i] + (r*dt/2.0) * (dotphi1[i]+dotphi2[i]) * cos(th[i]) yp[i+1] = yp[i] + (r*dt/2.0)*(dotphi1[i]+dotphi2[i])* sin(th[i]) th[i+1] = th[i] + (r*dt/(2.0*L))*(dotphi1[i]-dotphi2[i]) plt.figure() plt.plot(x,y,'g-', xp, yp, 'bx') plt.legend(['Original Path', 'Robot Path'],loc='best') plt.title('Path') plt.show()
[ "pylab.title", "pylab.show", "numpy.zeros", "numpy.ones", "pylab.figure", "numpy.linspace", "pylab.legend", "pylab.plot", "numpy.sqrt" ]
[((87, 109), 'numpy.linspace', 'np.linspace', (['t0', 't1', 'N'], {}), '(t0, t1, N)\n', (98, 109), True, 'import numpy as np\n'), ((129, 139), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (136, 139), True, 'import numpy as np\n'), ((147, 158), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (155, 158), True, 'import numpy as np\n'), ((166, 177), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (174, 177), True, 'import numpy as np\n'), ((185, 196), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (193, 196), True, 'import numpy as np\n'), ((215, 227), 'pylab.figure', 'plt.figure', ([], {}), '()\n', (225, 227), True, 'import pylab as plt\n'), ((228, 248), 'pylab.plot', 'plt.plot', (['x', 'y', '"""g-"""'], {}), "(x, y, 'g-')\n", (236, 248), True, 'import pylab as plt\n'), ((247, 279), 'pylab.legend', 'plt.legend', (["['Path']"], {'loc': '"""best"""'}), "(['Path'], loc='best')\n", (257, 279), True, 'import pylab as plt\n'), ((279, 306), 'pylab.title', 'plt.title', (['"""Quadratic Path"""'], {}), "('Quadratic Path')\n", (288, 306), True, 'import pylab as plt\n'), ((307, 317), 'pylab.show', 'plt.show', ([], {}), '()\n', (315, 317), True, 'import pylab as plt\n'), ((378, 412), 'numpy.sqrt', 'np.sqrt', (['(dotx * dotx + doty * doty)'], {}), '(dotx * dotx + doty * doty)\n', (385, 412), True, 'import numpy as np\n'), ((510, 554), 'pylab.plot', 'plt.plot', (['t', 'dotphi1', '"""b-"""', 't', 'dotphi2', '"""g-"""'], {}), "(t, dotphi1, 'b-', t, dotphi2, 'g-')\n", (518, 554), True, 'import pylab as plt\n'), ((551, 576), 'pylab.title', 'plt.title', (['"""Wheel Speeds"""'], {}), "('Wheel Speeds')\n", (560, 576), True, 'import pylab as plt\n'), ((577, 618), 'pylab.legend', 'plt.legend', (["['Right', 'Left']"], {'loc': '"""best"""'}), "(['Right', 'Left'], loc='best')\n", (587, 618), True, 'import pylab as plt\n'), ((618, 628), 'pylab.show', 'plt.show', ([], {}), '()\n', (626, 628), True, 'import pylab as plt\n'), ((903, 915), 'pylab.figure', 'plt.figure', ([], {}), '()\n', (913, 915), True, 'import pylab as plt\n'), ((916, 950), 'pylab.plot', 'plt.plot', (['x', 'y', '"""g-"""', 'xp', 'yp', '"""bx"""'], {}), "(x, y, 'g-', xp, yp, 'bx')\n", (924, 950), True, 'import pylab as plt\n'), ((949, 1004), 'pylab.legend', 'plt.legend', (["['Original Path', 'Robot Path']"], {'loc': '"""best"""'}), "(['Original Path', 'Robot Path'], loc='best')\n", (959, 1004), True, 'import pylab as plt\n'), ((1004, 1021), 'pylab.title', 'plt.title', (['"""Path"""'], {}), "('Path')\n", (1013, 1021), True, 'import pylab as plt\n'), ((1022, 1032), 'pylab.show', 'plt.show', ([], {}), '()\n', (1030, 1032), True, 'import pylab as plt\n')]
#!/usr/bin/env python # gatherUpper.py import numpy from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() LENGTH = 3 x = None x_local = numpy.linspace(rank*LENGTH,(rank+1)*LENGTH, LENGTH) print(x_local) if rank == 0: x = numpy.zeros(size*LENGTH) print (x) comm.Gather(x_local, x, root=0) #you should notice that only the root process has a value for x that #is not "None" print ("process", rank, "x:", x) print ("process", rank, "x_local:", x_local)
[ "numpy.zeros", "numpy.linspace" ]
[((173, 231), 'numpy.linspace', 'numpy.linspace', (['(rank * LENGTH)', '((rank + 1) * LENGTH)', 'LENGTH'], {}), '(rank * LENGTH, (rank + 1) * LENGTH, LENGTH)\n', (187, 231), False, 'import numpy\n'), ((262, 288), 'numpy.zeros', 'numpy.zeros', (['(size * LENGTH)'], {}), '(size * LENGTH)\n', (273, 288), False, 'import numpy\n')]
"""Run simulations for SDC model. Parameters ---------- N_JOBS Number of cores used for parallelization. RANDOM_SEED Seed for the random numbers generator. SPACE Types of social space. Available values: 'uniform', 'lognormal', 'clusters_normal'. N Sizes of networks, NDIM Number of dimensions of simulated social spaces. DATA_REP Number of independent realizations of social spaces. SDA_PARAMS k Expected average degree. alpha Homophily level. directed Directed/undirected networks. p_rewire Probability of random rewiring. SDA_REP Number of independent realizations of adjacency matrices. SIM_PARAMS degseq_type Degree sequence type. One of: 'poisson', 'negbinom', 'powerlaw'. degseq_sort Should degree sequence be sorted by expected node degrees. """ import os import gc import numpy as np import pandas as pd from sklearn.externals.joblib import Memory import _ # Globals ROOT = os.path.dirname(os.path.realpath(__file__)) HERE = ROOT DATAPATH = os.path.join(HERE, 'raw-data') # Persistence MEMORY = Memory(location='.cache', verbose=1) N_JOBS = 4 # Data generation params RANDOM_SEED = 101 SPACE = ('uniform', 'lognormal', 'clusters_normal') N = (1000, 2000, 4000, 8000) NDIM = (1, 2, 4, 8, 16) CENTERS = (4,) DATA_REP = 2 # SDA params SDA_PARAMS = { 'k': (30,), 'alpha': (2, 4, 8, np.inf), 'directed': (False,), 'p_rewire': (.01,) } SDA_REP = 3 SIM_PARAMS = { 'degseq_type': ('poisson', 'negbinom', 'powerlaw'), 'sort': (True, False) } @MEMORY.cache(ignore=['n_jobs']) def simulate_cm(space, dparams, drep, sdaparams, sdarep, simparams, n_jobs): return _.simulate(space, dparams, drep, sdaparams, sdarep, simparams, n_jobs, simfunc=_.run_sdac) # Run simulations if RANDOM_SEED is not None: np.random.seed(RANDOM_SEED) sim = lambda s: simulate_cm( space=s, dparams=(N, NDIM, CENTERS), drep=DATA_REP, sdaparams=SDA_PARAMS, sdarep=SDA_REP, simparams=SIM_PARAMS, n_jobs=N_JOBS ) df = None # main data frame gdf = None # graph data frame for s in SPACE: sim(s) gc.collect() for s in SPACE: print(f"\rloading and processing '{s}' space' ...", end="") _df = sim(s) _df.drop(columns=['A', 'labels'], inplace=True) if df is None: df = _df else: df = pd.concat((df, _df), ignore_index=True) # Save data ------------------------------------------------------------------- # Standard data get saved as feather file, so it can be easily # shared with R for data analysis and visualization. # Adjacency matrices data is saved as a separate pickle file. # It will be used for graph visualizations. os.makedirs(DATAPATH, exist_ok=True) # Save main data as a feather file df.to_feather(os.path.join(DATAPATH, 'sda-data-cm.feather')) # Save graph data as a pickle file # joblib.dump(gdf, os.path.join(DATAPATH, 'sda-graphs-cm.pkl'))
[ "numpy.random.seed", "os.makedirs", "_.simulate", "os.path.realpath", "sklearn.externals.joblib.Memory", "gc.collect", "os.path.join", "pandas.concat" ]
[((1067, 1097), 'os.path.join', 'os.path.join', (['HERE', '"""raw-data"""'], {}), "(HERE, 'raw-data')\n", (1079, 1097), False, 'import os\n'), ((1122, 1158), 'sklearn.externals.joblib.Memory', 'Memory', ([], {'location': '""".cache"""', 'verbose': '(1)'}), "(location='.cache', verbose=1)\n", (1128, 1158), False, 'from sklearn.externals.joblib import Memory\n'), ((2185, 2197), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2195, 2197), False, 'import gc\n'), ((2750, 2786), 'os.makedirs', 'os.makedirs', (['DATAPATH'], {'exist_ok': '(True)'}), '(DATAPATH, exist_ok=True)\n', (2761, 2786), False, 'import os\n'), ((1016, 1042), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1032, 1042), False, 'import os\n'), ((1709, 1803), '_.simulate', '_.simulate', (['space', 'dparams', 'drep', 'sdaparams', 'sdarep', 'simparams', 'n_jobs'], {'simfunc': '_.run_sdac'}), '(space, dparams, drep, sdaparams, sdarep, simparams, n_jobs,\n simfunc=_.run_sdac)\n', (1719, 1803), False, 'import _\n'), ((1874, 1901), 'numpy.random.seed', 'np.random.seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (1888, 1901), True, 'import numpy as np\n'), ((2837, 2882), 'os.path.join', 'os.path.join', (['DATAPATH', '"""sda-data-cm.feather"""'], {}), "(DATAPATH, 'sda-data-cm.feather')\n", (2849, 2882), False, 'import os\n'), ((2406, 2445), 'pandas.concat', 'pd.concat', (['(df, _df)'], {'ignore_index': '(True)'}), '((df, _df), ignore_index=True)\n', (2415, 2445), True, 'import pandas as pd\n')]
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.utils import build_from_cfg from mmdet.core.bbox.iou_calculators.builder import IOU_CALCULATORS ROTATED_IOU_CALCULATORS = IOU_CALCULATORS def build_iou_calculator(cfg, default_args=None): """Builder of IoU calculator.""" return build_from_cfg(cfg, ROTATED_IOU_CALCULATORS, default_args)
[ "mmcv.utils.build_from_cfg" ]
[((297, 355), 'mmcv.utils.build_from_cfg', 'build_from_cfg', (['cfg', 'ROTATED_IOU_CALCULATORS', 'default_args'], {}), '(cfg, ROTATED_IOU_CALCULATORS, default_args)\n', (311, 355), False, 'from mmcv.utils import build_from_cfg\n')]
""" The tool to check the availability or syntax of domain, IP or URL. :: ██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗ ██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝ ██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗ ██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝ ██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝ Tests of URL 2 Network Location converter. Author: <NAME>, @funilrys, contactTATAfunilrysTODTODcom Special thanks: https://pyfunceble.github.io/special-thanks.html Contributors: https://pyfunceble.github.io/contributors.html Project link: https://github.com/funilrys/PyFunceble Project documentation: https://pyfunceble.readthedocs.io/en/dev/ Project homepage: https://pyfunceble.github.io/ License: :: Copyright 2017, 2018, 2019, 2020, 2021 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest import unittest.mock from PyFunceble.converter.url2netloc import Url2Netloc class TestUrl2Netloc(unittest.TestCase): """ Tests our internal URL converter. """ def setUp(self) -> None: """ Setups everything needed for the tests. """ self.converter = Url2Netloc() def tearDown(self) -> None: """ Destroys everything previously created for the tests. """ del self.converter def test_set_data_to_convert_no_string(self) -> None: """ Tests the method which let us set the data to work with for the case that a non-string value is given. """ given = ["Hello", "World"] self.assertRaises(TypeError, lambda: self.converter.set_data_to_convert(given)) def test_set_data_to_convert_empty_string(self) -> None: """ Tests the method which let us set the data to work with for the case that an empty-string value is given. """ given = "" self.assertRaises(ValueError, lambda: self.converter.set_data_to_convert(given)) def test_get_converted_nothing_to_decode(self) -> None: """ Tests the method which let us extracts the netloc from a given URL for the case that no conversion is needed. """ given = "example.org" expected = "example.org" self.converter.data_to_convert = given actual = self.converter.get_converted() self.assertEqual(expected, actual) def test_get_converted_full_url(self) -> None: """ Tests the method which let us extracts the netloc from a given URL for the case that a full URL is given. """ given = "https://example.org/hello/world/this/is/a/test" expected = "example.org" self.converter.data_to_convert = given actual = self.converter.get_converted() self.assertEqual(expected, actual) def test_get_converted_full_url_with_port(self) -> None: """ Tests the method which let us extracts the netloc from a given URL for the case that a full URL (with explicit port) is given. """ given = "https://example.org:8080/hello/world/this/is/a/test" expected = "example.org:8080" self.converter.data_to_convert = given actual = self.converter.get_converted() self.assertEqual(expected, actual) def test_get_converted_full_url_with_params(self) -> None: """ Tests the method which let us extracts the netloc from a given URL for the case that a full URL (with params) is given. """ given = "https://example.org/?is_admin=true" expected = "example.org" self.converter.data_to_convert = given actual = self.converter.get_converted() self.assertEqual(expected, actual) def test_get_converted_url_without_scheme(self) -> None: """ Tests the method which let us extracts the netloc from a given URL for the case that no scheme is given. """ given = "example.org/hello/world/this/is/a/test" expected = "example.org" self.converter.data_to_convert = given actual = self.converter.get_converted() self.assertEqual(expected, actual) def test_get_converted_url_without_scheme_and_with_params(self) -> None: """ Tests the method which let us extracts the netloc from a given URL for the case that no scheme (but with params) is given. """ given = "example.org/?is_admin=true" expected = "example.org" self.converter.data_to_convert = given actual = self.converter.get_converted() self.assertEqual(expected, actual) def test_get_converted_url_without_protocol(self) -> None: """ Tests the method which let us extracts the netloc from a given URL for the case that no protocol is given. """ given = "://example.org/hello/world/this/is/a/test" expected = "example.org" self.converter.data_to_convert = given actual = self.converter.get_converted() self.assertEqual(expected, actual) def test_get_converted_url_without_protocol_and_with_params(self) -> None: """ Tests the method which let us extracts the netloc from a given URL for the case that no protocol (but params) is given. """ given = "://example.org/?is_admin=true" expected = "example.org" self.converter.data_to_convert = given actual = self.converter.get_converted() self.assertEqual(expected, actual) def test_get_converted_url_without_protocol_and_path(self) -> None: """ Tests the method which let us extracts the netloc from a given URL for the case that no protocol and path is given. """ given = "://example.org/" expected = "example.org" self.converter.data_to_convert = given actual = self.converter.get_converted() self.assertEqual(expected, actual) def test_get_converted_url_startswith_2_slashes(self) -> None: """ Tests the method which let us extracts the netloc from a given URL for the case that the given url starts with 2 slashes. """ given = "//example.org/hello/world/this/is/a/test" expected = "example.org" self.converter.data_to_convert = given actual = self.converter.get_converted() self.assertEqual(expected, actual) def test_get_converted_url_startswith_1_slash(self) -> None: """ Tests the method which let us extracts the netloc from a given URL for the case that the given url starts with 1 slash. """ given = "/example.org/hello/world/this/is/a/test" expected = "" self.converter.data_to_convert = given actual = self.converter.get_converted() self.assertEqual(expected, actual) if __name__ == "__main__": unittest.main()
[ "unittest.main", "PyFunceble.converter.url2netloc.Url2Netloc" ]
[((7749, 7764), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7762, 7764), False, 'import unittest\n'), ((1978, 1990), 'PyFunceble.converter.url2netloc.Url2Netloc', 'Url2Netloc', ([], {}), '()\n', (1988, 1990), False, 'from PyFunceble.converter.url2netloc import Url2Netloc\n')]
import requests import json import time import neopixel import board #Set Colours RED = (255, 0, 0) YELLOW = (255, 150, 0) ORANGE = (100, 64, 0) GREEN = (0, 255, 0) CYAN = (0, 255, 255) BLUE = (0, 0, 255) PURPLE = (180, 0, 255) OFF = (0, 0, 0) #Set NeoPixel Details - Pin/Number Pixels/Brightness etc pixels = neopixel.NeoPixel(board.D18, 144, brightness=0.03, auto_write=False) #Start up Lights n = 1 t_end = time.time() + 22.32 * 1 while time.time() < t_end: n = n + 1 if n >= 144: n = 1 pixels[n] = (RED) pixels[n-1] = (YELLOW) pixels.show() time.sleep (0.1) pixels.fill((0, 0, 0)) pixels.show() print ("Getting Conditions and Forecast") def getconditions(): # Get Data from Weather Flow for Station Location try: response = requests.get('https://swd.weatherflow.com/swd/rest/better_forecast?api_key=db55228a-b708-4325-9166-7f2d04c61baa&station_id=50216&units_temp=c&units_wind=mph&units_pressure=mb&units_precip=mm&units_distance=mi').text except requests.exceptions.RequestException as e: time.sleep(60) data = json.loads(response) text = data['current_conditions']['conditions'] icon = data['current_conditions']['icon'] baro = int(data['current_conditions']['sea_level_pressure']) trend = data['current_conditions']['pressure_trend'] print(text) print(icon) print(baro) print(trend) return trend, baro, icon def barometer(): conditions = getconditions() baro = conditions[1] # Pressure top 1050 minus number of pixels to set top pixel pixel = 906 pixelon = int(baro - pixel) pixels[pixelon] = (RED) def trendpixel(): conditions = getconditions() trend = conditions[0] if trend == 'steady': pixels[14] = (GREEN) else: pixels[14] = (OFF) if trend == 'rising': pixels[16] = (BLUE) else: pixels[16] = (OFF) if trend == 'falling': pixels[12] = (RED) else: pixels[12] = (OFF) def icon(): conditions = getconditions() icon = str(conditions[2]) print("Icon") print(icon) if icon == 'clear-day': pixels[36] = (YELLOW) else: pixels[36] = (OFF) if icon == 'partly-cloudy-day' or 'partly-cloudy-night': pixels[34] = (BLUE) else: pixels[34] = (OFF) if icon == 'cloudy': pixels[32] = (BLUE) else: pixels[32] = (OFF) if icon == 'possibly-rainy-day': pixels[30] = (BLUE) else: pixels[30] = (OFF) if icon == 'possibly-rainy-night': pixels[30] = (BLUE) else: pixels[30] = (OFF) if icon == 'clear-night': pixels[22] = (BLUE) else: pixels[22] = (OFF) while True: getconditions() barometer() trendpixel() icon() pixels.show() time.sleep(60)
[ "json.loads", "time.sleep", "time.time", "requests.get", "neopixel.NeoPixel" ]
[((316, 384), 'neopixel.NeoPixel', 'neopixel.NeoPixel', (['board.D18', '(144)'], {'brightness': '(0.03)', 'auto_write': '(False)'}), '(board.D18, 144, brightness=0.03, auto_write=False)\n', (333, 384), False, 'import neopixel\n'), ((420, 431), 'time.time', 'time.time', ([], {}), '()\n', (429, 431), False, 'import time\n'), ((450, 461), 'time.time', 'time.time', ([], {}), '()\n', (459, 461), False, 'import time\n'), ((587, 602), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (597, 602), False, 'import time\n'), ((1101, 1121), 'json.loads', 'json.loads', (['response'], {}), '(response)\n', (1111, 1121), False, 'import json\n'), ((2813, 2827), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (2823, 2827), False, 'import time\n'), ((795, 1015), 'requests.get', 'requests.get', (['"""https://swd.weatherflow.com/swd/rest/better_forecast?api_key=db55228a-b708-4325-9166-7f2d04c61baa&station_id=50216&units_temp=c&units_wind=mph&units_pressure=mb&units_precip=mm&units_distance=mi"""'], {}), "(\n 'https://swd.weatherflow.com/swd/rest/better_forecast?api_key=db55228a-b708-4325-9166-7f2d04c61baa&station_id=50216&units_temp=c&units_wind=mph&units_pressure=mb&units_precip=mm&units_distance=mi'\n )\n", (807, 1015), False, 'import requests\n'), ((1074, 1088), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (1084, 1088), False, 'import time\n')]
import cPickle as pickle import datetime from django.shortcuts import render_to_response, get_object_or_404 from django.template import RequestContext from django.http import Http404, HttpResponse from django.core import urlresolvers from unipath import FSPath as Path from projects.models import Project def document(request, project, url): try: project = Project.objects.get(slug=project) except Project.DoesNotExist: raise Http404 docroot = Path(project.get_pickle_path()) # First look for <bits>/index.fpickle, then for <bits>.fpickle bits = url.strip('/').split('/') + ['index.fpickle'] doc = docroot.child(*bits) if not doc.exists(): bits = bits[:-2] + ['%s.fpickle' % bits[-2]] doc = docroot.child(*bits) if not doc.exists(): raise Http404("'%s' does not exist" % doc) bits[-1] = bits[-1].replace('.fpickle', '') template_names = [ 'docs/%s.html' % '-'.join([b for b in bits if b]), 'docs/doc.html' ] return render_to_response(template_names, RequestContext(request, { 'doc': pickle.load(open(doc, 'rb')), 'env': pickle.load(open(docroot.child('globalcontext.pickle'), 'rb')), 'update_date': datetime.datetime.fromtimestamp(docroot.child('last_build').mtime()), 'home': project.get_absolute_url(), 'redirect_from': request.GET.get('from', None), })) def update(request, slug): try: project = Project.objects.get(slug=slug) except Project.DoesNotExist: raise Http404 project.update() return HttpResponse('done')
[ "django.http.Http404", "projects.models.Project.objects.get", "django.http.HttpResponse" ]
[((1601, 1621), 'django.http.HttpResponse', 'HttpResponse', (['"""done"""'], {}), "('done')\n", (1613, 1621), False, 'from django.http import Http404, HttpResponse\n'), ((370, 403), 'projects.models.Project.objects.get', 'Project.objects.get', ([], {'slug': 'project'}), '(slug=project)\n', (389, 403), False, 'from projects.models import Project\n'), ((1478, 1508), 'projects.models.Project.objects.get', 'Project.objects.get', ([], {'slug': 'slug'}), '(slug=slug)\n', (1497, 1508), False, 'from projects.models import Project\n'), ((822, 858), 'django.http.Http404', 'Http404', (['("\'%s\' does not exist" % doc)'], {}), '("\'%s\' does not exist" % doc)\n', (829, 858), False, 'from django.http import Http404, HttpResponse\n')]
# ***************************************************************** # Copyright 2013 MIT Lincoln Laboratory # Project: SPAR # Authors: SY # Description: Section class # # # Modifications: # Date Name Modification # ---- ---- ------------ # 19 Sep 2013 SY Original version # ***************************************************************** # SPAR imports: import spar_python.report_generation.ta1.ta1_section as section import spar_python.report_generation.common.regression as regression import spar_python.report_generation.common.latex_classes as latex_classes import spar_python.report_generation.ta1.ta1_schema as t1s import spar_python.report_generation.ta1.ta1_analysis_input as t1ai class Ta1LatencySection(section.Ta1Section): """The latency section of the TA1 report""" def _store_query_latency_table(self): """Stores the LaTeX string representing the query latency table on the output object.""" constraint_list = self._config.get_constraint_list( require_correct=True) categories = self._config.results_db.get_unique_query_values( simple_fields=[(t1s.DBF_TABLENAME, t1s.DBF_NUMRECORDS), (t1s.DBF_TABLENAME, t1s.DBF_RECORDSIZE), (t1s.DBP_TABLENAME, t1s.DBP_SELECTIONCOLS), (t1s.DBF_TABLENAME, t1s.DBF_CAT)], constraint_list=constraint_list) # create the latency table: latency_table = latex_classes.LatexTable( "Query Latency vs. Number of Records Returned Best Fit Functions", "lat_main", ["DBNR", "DBRS", "Select", "Query Type", "Best-Fit Func", "R-Squared"]) # compute correctness for every query category: for (dbnr, dbrs, selection_cols, query_cat) in categories: inp = t1ai.Input() inp[t1s.DBF_CAT] = query_cat inp[t1s.DBF_NUMRECORDS] = dbnr inp[t1s.DBF_RECORDSIZE] = dbrs inp[t1s.DBP_SELECTIONCOLS] = selection_cols this_constraint_list = constraint_list + inp.get_constraint_list() [x_values, y_values] = self._config.results_db.get_query_values( simple_fields=[ (t1s.DBP_TABLENAME, t1s.DBP_NUMNEWRETURNEDRECORDS), (t1s.DBP_TABLENAME, t1s.DBP_QUERYLATENCY)], constraint_list=this_constraint_list) try: inputs = [x_values] outputs = y_values function = regression.regress( function_to_regress=self._config.ql_all_ftr, outputs=outputs, inputs=inputs) function_string = function.string rsquared = function.get_rsquared(inputs, outputs) except regression.BadRegressionInputError: function_string = "-" rsquared = "-" latency_table.add_content( [inp.test_db.get_db_num_records_str(), inp.test_db.get_db_record_size_str(), selection_cols, query_cat, function_string, rsquared]) self._outp["query_latency_table"] = latency_table.get_string() def _populate_output(self): """Populates the output object which is passed to the Jinja tempalte in get_string.""" self._store_query_latency_table()
[ "spar_python.report_generation.ta1.ta1_analysis_input.Input", "spar_python.report_generation.common.latex_classes.LatexTable", "spar_python.report_generation.common.regression.regress" ]
[((1587, 1775), 'spar_python.report_generation.common.latex_classes.LatexTable', 'latex_classes.LatexTable', (['"""Query Latency vs. Number of Records Returned Best Fit Functions"""', '"""lat_main"""', "['DBNR', 'DBRS', 'Select', 'Query Type', 'Best-Fit Func', 'R-Squared']"], {}), "(\n 'Query Latency vs. Number of Records Returned Best Fit Functions',\n 'lat_main', ['DBNR', 'DBRS', 'Select', 'Query Type', 'Best-Fit Func',\n 'R-Squared'])\n", (1611, 1775), True, 'import spar_python.report_generation.common.latex_classes as latex_classes\n'), ((1954, 1966), 'spar_python.report_generation.ta1.ta1_analysis_input.Input', 't1ai.Input', ([], {}), '()\n', (1964, 1966), True, 'import spar_python.report_generation.ta1.ta1_analysis_input as t1ai\n'), ((2643, 2743), 'spar_python.report_generation.common.regression.regress', 'regression.regress', ([], {'function_to_regress': 'self._config.ql_all_ftr', 'outputs': 'outputs', 'inputs': 'inputs'}), '(function_to_regress=self._config.ql_all_ftr, outputs=\n outputs, inputs=inputs)\n', (2661, 2743), True, 'import spar_python.report_generation.common.regression as regression\n')]
import numpy as np import scipy.signal as sp import scipy.spatial.distance as sp_dist import librosa class MedianNMF: y, sr = None,None n_components = None def __init__(self,y,sr,n_components = 5): self.y, self.sr = y,sr self.n_components = n_components def decompose(self): #filter out precussive parts hpss_y = self.hpss() #Perform Short-time Fourier transform D = librosa.stft(hpss_y) # Separate the magnitude and phase S, phase = librosa.magphase(D) #NMF decompose to components components, activations = self.decomposeNMF(hpss_y, S, self.n_components) #reconstruct and return return [self.reconstructComponent( components[:, i], activations[i], phase) for i in range(0,len(activations))] def hpss(self, margin=4.0): #extract precussive components through median filtering return librosa.effects.percussive(self.y, margin=margin) def decomposeNMF(self, y, magnitude, n_components): # Decompose by nmf return librosa.decompose.decompose(magnitude, n_components, sort=True) def reconstructFull(self, activations, phase): #reconstruct all components into one signal D_k = components.dot(activations) y_k = librosa.istft(D_k * phase) return y_k def reconstructComponent(self, components, activation, phase): D_k = np.multiply.outer(components, activation) y_k = librosa.istft(D_k * phase) #filter out noise using Savitzky-Golay filter component_filtered = sp.savgol_filter(y_k,11,1) return component_filtered
[ "librosa.decompose.decompose", "scipy.signal.savgol_filter", "librosa.effects.percussive", "librosa.istft", "numpy.multiply.outer", "librosa.magphase", "librosa.stft" ]
[((438, 458), 'librosa.stft', 'librosa.stft', (['hpss_y'], {}), '(hpss_y)\n', (450, 458), False, 'import librosa\n'), ((521, 540), 'librosa.magphase', 'librosa.magphase', (['D'], {}), '(D)\n', (537, 540), False, 'import librosa\n'), ((940, 989), 'librosa.effects.percussive', 'librosa.effects.percussive', (['self.y'], {'margin': 'margin'}), '(self.y, margin=margin)\n', (966, 989), False, 'import librosa\n'), ((1090, 1153), 'librosa.decompose.decompose', 'librosa.decompose.decompose', (['magnitude', 'n_components'], {'sort': '(True)'}), '(magnitude, n_components, sort=True)\n', (1117, 1153), False, 'import librosa\n'), ((1314, 1340), 'librosa.istft', 'librosa.istft', (['(D_k * phase)'], {}), '(D_k * phase)\n', (1327, 1340), False, 'import librosa\n'), ((1446, 1487), 'numpy.multiply.outer', 'np.multiply.outer', (['components', 'activation'], {}), '(components, activation)\n', (1463, 1487), True, 'import numpy as np\n'), ((1502, 1528), 'librosa.istft', 'librosa.istft', (['(D_k * phase)'], {}), '(D_k * phase)\n', (1515, 1528), False, 'import librosa\n'), ((1613, 1641), 'scipy.signal.savgol_filter', 'sp.savgol_filter', (['y_k', '(11)', '(1)'], {}), '(y_k, 11, 1)\n', (1629, 1641), True, 'import scipy.signal as sp\n')]
import numpy as np from collections import defaultdict class Agent: def __init__(self, nA=6): """ Initialize agent. Params ====== - nA: number of actions available to the agent """ self.nA = nA self.Q = defaultdict(lambda: np.zeros(self.nA)) self.epsilon_start = 1.0 self.i_episode = 1.0 self.alpha = 0.04 self.gamma = 0.9 def epsilon_greedy_probs(self, state, epsilon): ''' Calculation of probabilities accordgin to a epsilon greedy policy''' probs = np.ones(self.nA) * epsilon / self.nA best_action = np.argmax(self.Q[state]) probs[best_action] = 1 - epsilon + (epsilon / self.nA) return probs def select_action(self, state): """ Given the state, select an action. Params ====== - state: the current state of the environment Returns ======= - action: an integer, compatible with the task's action space """ # Random action # action = np.random.choice(self.nA) # Epsilon decay epsilon = self.epsilon_start / self.i_episode # Epsilon-greedy policy/probabilities probs = self.epsilon_greedy_probs(state, epsilon) # Action selection acc. to epsilon-greedy policy action = np.random.choice(np.arange(self.nA), p = probs) return action def step(self, state, action, reward, next_state, done): """ Update the agent's knowledge, using the most recently sampled tuple. Params ====== - state: the previous state of the environment - action: the agent's previous choice of action - reward: last reward received - next_state: the current state of the environment - done: whether the episode is complete (True or False) """ # SARSA method next_action = self.select_action(next_state) Gt = reward + self.gamma * self.Q[next_state][next_action] # Q-learning (SARSAMAX) method #best_action = np.argmax(self.Q[next_state]) #Gt = reward + self.gamma * self.Q[next_state][best_action] self.Q[state][action] += self.alpha * (Gt - self.Q[state][action]) # i_episode update for calculation of epsilon decay self.i_episode += 1.0
[ "numpy.zeros", "numpy.arange", "numpy.ones", "numpy.argmax" ]
[((636, 660), 'numpy.argmax', 'np.argmax', (['self.Q[state]'], {}), '(self.Q[state])\n', (645, 660), True, 'import numpy as np\n'), ((1397, 1415), 'numpy.arange', 'np.arange', (['self.nA'], {}), '(self.nA)\n', (1406, 1415), True, 'import numpy as np\n'), ((286, 303), 'numpy.zeros', 'np.zeros', (['self.nA'], {}), '(self.nA)\n', (294, 303), True, 'import numpy as np\n'), ((577, 593), 'numpy.ones', 'np.ones', (['self.nA'], {}), '(self.nA)\n', (584, 593), True, 'import numpy as np\n')]
import argparse import sys import copy from graphviz import Digraph from rply import LexingError, ParsingError from lang.lexer import Lexer from lang.parser import Parser from lang.scope import Scope lexer = Lexer() parser = Parser(lexer.tokens) def execute(scope, source, draw=False, lexer_output=False, opt=False): try: tokens = lexer.lex(source) if lexer_output: print("LEXER OUTPUT") for token in copy.copy(tokens): print(token) print() print("PROGRAM OUTPUT") ast = parser.parse(tokens) # Optimize if opt: ast.eval(True, scope) result = ast.eval(False, scope) # Draw AST graph if draw: g = Digraph() ast.draw(g) g.render("ast", format="png", view=True, cleanup=True) return result except ValueError as err: print(err) except LexingError: print("Lexing error") except ParsingError: print("Parsing error") def run_repl(): scope = Scope() while True: try: source = input("> ") result = execute(scope, source) if result is not None: print(result) if scope.last_pop is not None: scope.symbols_stack.insert(0, scope.last_pop) except KeyboardInterrupt: break def run_file(path, draw=False, lexer_output=False): scope = Scope() with open(path, "r") as f: source = f.read() execute(scope, source, draw=draw, lexer_output=lexer_output) if __name__ == "__main__": arg_parser = argparse.ArgumentParser() arg_parser.add_argument("file", nargs="?", help="path to script") arg_parser.add_argument( "-a", "--ast", help="draw abstract syntax tree", action="store_true" ) arg_parser.add_argument( "-l", "--lexer", help="print lexer output", action="store_true" ) args = arg_parser.parse_args() if args.file: run_file(args.file, draw=args.ast, lexer_output=args.lexer) else: run_repl()
[ "argparse.ArgumentParser", "lang.parser.Parser", "copy.copy", "graphviz.Digraph", "lang.scope.Scope", "lang.lexer.Lexer" ]
[((211, 218), 'lang.lexer.Lexer', 'Lexer', ([], {}), '()\n', (216, 218), False, 'from lang.lexer import Lexer\n'), ((228, 248), 'lang.parser.Parser', 'Parser', (['lexer.tokens'], {}), '(lexer.tokens)\n', (234, 248), False, 'from lang.parser import Parser\n'), ((1074, 1081), 'lang.scope.Scope', 'Scope', ([], {}), '()\n', (1079, 1081), False, 'from lang.scope import Scope\n'), ((1476, 1483), 'lang.scope.Scope', 'Scope', ([], {}), '()\n', (1481, 1483), False, 'from lang.scope import Scope\n'), ((1656, 1681), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1679, 1681), False, 'import argparse\n'), ((451, 468), 'copy.copy', 'copy.copy', (['tokens'], {}), '(tokens)\n', (460, 468), False, 'import copy\n'), ((761, 770), 'graphviz.Digraph', 'Digraph', ([], {}), '()\n', (768, 770), False, 'from graphviz import Digraph\n')]
import logging import os from typing import Literal, Union from ghaudit.cli import cli LOGFILE = os.environ.get("LOGFILE") LOGLEVEL = os.environ.get("LOGLEVEL", "ERROR") # pylint: disable=line-too-long LOG_FORMAT = "{asctime} {levelname:8s} ghaudit <{filename}:{lineno} {module}.{funcName}> {message}" # noqa: E501 STYLE = "{" # type: Union[Literal["%"], Literal["{"], Literal["$"]] def main() -> None: if LOGFILE: handler = logging.FileHandler(LOGFILE) formatter = logging.Formatter(LOG_FORMAT, style=STYLE) handler.setFormatter(formatter) root = logging.getLogger() root.setLevel(LOGLEVEL) root.addHandler(handler) else: logging.basicConfig(level=LOGLEVEL, format=LOG_FORMAT, style=STYLE) # pylint: disable=no-value-for-parameter cli() if __name__ == "__main__": main()
[ "ghaudit.cli.cli", "logging.FileHandler", "logging.basicConfig", "os.environ.get", "logging.Formatter", "logging.getLogger" ]
[((99, 124), 'os.environ.get', 'os.environ.get', (['"""LOGFILE"""'], {}), "('LOGFILE')\n", (113, 124), False, 'import os\n'), ((136, 171), 'os.environ.get', 'os.environ.get', (['"""LOGLEVEL"""', '"""ERROR"""'], {}), "('LOGLEVEL', 'ERROR')\n", (150, 171), False, 'import os\n'), ((810, 815), 'ghaudit.cli.cli', 'cli', ([], {}), '()\n', (813, 815), False, 'from ghaudit.cli import cli\n'), ((443, 471), 'logging.FileHandler', 'logging.FileHandler', (['LOGFILE'], {}), '(LOGFILE)\n', (462, 471), False, 'import logging\n'), ((492, 534), 'logging.Formatter', 'logging.Formatter', (['LOG_FORMAT'], {'style': 'STYLE'}), '(LOG_FORMAT, style=STYLE)\n', (509, 534), False, 'import logging\n'), ((590, 609), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (607, 609), False, 'import logging\n'), ((693, 760), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'LOGLEVEL', 'format': 'LOG_FORMAT', 'style': 'STYLE'}), '(level=LOGLEVEL, format=LOG_FORMAT, style=STYLE)\n', (712, 760), False, 'import logging\n')]
import numpy as np class Mesh: """ Contains all the information about the spatial domain """ def __init__(self,dimension,topology,geometry): self.Nvoxels = len(topology) self.dimension = dimension self.topology = topology # adjaceny matrix (numpy array), 0 along main diagonal, 1 elsewhere # only really works for regular grids self.geometry = geometry # numpy array of Nvoxels pairs (volume,x,(y,(z))) def get_coarseMesh_voxel(voxel,coupling): # returns the coarse mesh voxel associated with # voxel by the coupling # by convention I take the coarse mesh voxel to by the smallest # index coupled to voxel according to coupling i = 0 while coupling[voxel,i]<1: i = i+1 return i def make_lattice1d(Nx,L): # generates uniform 1d lattice on [0,L] topology = np.zeros((Nx,Nx)) d = np.ones(Nx-1) topology = np.diag(d,1)+np.diag(d,-1) geometry = np.zeros((Nx,2)) h = L/Nx geometry[:,0] = h*np.ones(Nx) geometry[:,1] = np.linspace(0,L-h,Nx) mesh = Mesh(1,topology,geometry) return mesh def make_lattice1d_coupled(Nx,L,J): mesh = make_lattice1d(Nx,L) coupling = np.zeros((Nx,Nx)) for i in range(int(Nx/J)): coupling[i*J:(i+1)*J,i*J:(i+1)*J] = np.ones((J,J)) return mesh,coupling # need to implement def make_lattice2d(Nx,Ny,Lx,Ly): topology = np.zeros((Nx*Ny,Nx*Ny)) d1 = np.ones(Nx-1) d2 = np.ones(Nx*Ny-Ny) for i in range(Ny): topology[i*Ny:(i+1)*Ny,i*Ny:(i+1)*Ny] = np.diag(d1,1)+np.diag(d1,-1) topology = topology + np.diag(d2,Nx)+np.diag(d2,-Nx) geometry = np.zeros((Nx*Ny,2)) hx = Nx/Lx hy = Ny/Ly #geometry[:,0] = h*np.ones(Nx) #geometry[:,1] = linspace(0,L-h,Nx) mesh = Mesh(1,topology,geometry) return mesh def make_lattice3d(Nx,Ny): return None
[ "numpy.diag", "numpy.zeros", "numpy.ones", "numpy.linspace" ]
[((855, 873), 'numpy.zeros', 'np.zeros', (['(Nx, Nx)'], {}), '((Nx, Nx))\n', (863, 873), True, 'import numpy as np\n'), ((881, 896), 'numpy.ones', 'np.ones', (['(Nx - 1)'], {}), '(Nx - 1)\n', (888, 896), True, 'import numpy as np\n'), ((952, 969), 'numpy.zeros', 'np.zeros', (['(Nx, 2)'], {}), '((Nx, 2))\n', (960, 969), True, 'import numpy as np\n'), ((1036, 1061), 'numpy.linspace', 'np.linspace', (['(0)', '(L - h)', 'Nx'], {}), '(0, L - h, Nx)\n', (1047, 1061), True, 'import numpy as np\n'), ((1196, 1214), 'numpy.zeros', 'np.zeros', (['(Nx, Nx)'], {}), '((Nx, Nx))\n', (1204, 1214), True, 'import numpy as np\n'), ((1398, 1426), 'numpy.zeros', 'np.zeros', (['(Nx * Ny, Nx * Ny)'], {}), '((Nx * Ny, Nx * Ny))\n', (1406, 1426), True, 'import numpy as np\n'), ((1431, 1446), 'numpy.ones', 'np.ones', (['(Nx - 1)'], {}), '(Nx - 1)\n', (1438, 1446), True, 'import numpy as np\n'), ((1454, 1475), 'numpy.ones', 'np.ones', (['(Nx * Ny - Ny)'], {}), '(Nx * Ny - Ny)\n', (1461, 1475), True, 'import numpy as np\n'), ((1645, 1667), 'numpy.zeros', 'np.zeros', (['(Nx * Ny, 2)'], {}), '((Nx * Ny, 2))\n', (1653, 1667), True, 'import numpy as np\n'), ((910, 923), 'numpy.diag', 'np.diag', (['d', '(1)'], {}), '(d, 1)\n', (917, 923), True, 'import numpy as np\n'), ((923, 937), 'numpy.diag', 'np.diag', (['d', '(-1)'], {}), '(d, -1)\n', (930, 937), True, 'import numpy as np\n'), ((1004, 1015), 'numpy.ones', 'np.ones', (['Nx'], {}), '(Nx)\n', (1011, 1015), True, 'import numpy as np\n'), ((1289, 1304), 'numpy.ones', 'np.ones', (['(J, J)'], {}), '((J, J))\n', (1296, 1304), True, 'import numpy as np\n'), ((1614, 1630), 'numpy.diag', 'np.diag', (['d2', '(-Nx)'], {}), '(d2, -Nx)\n', (1621, 1630), True, 'import numpy as np\n'), ((1544, 1558), 'numpy.diag', 'np.diag', (['d1', '(1)'], {}), '(d1, 1)\n', (1551, 1558), True, 'import numpy as np\n'), ((1558, 1573), 'numpy.diag', 'np.diag', (['d1', '(-1)'], {}), '(d1, -1)\n', (1565, 1573), True, 'import numpy as np\n'), ((1599, 1614), 'numpy.diag', 'np.diag', (['d2', 'Nx'], {}), '(d2, Nx)\n', (1606, 1614), True, 'import numpy as np\n')]
# snapy - a python snmp library # # Copyright (C) 2009 ITA Software, Inc. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # version 2 as published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. import time from twisted.trial import unittest from snapy.netsnmp.unittests import TestCase from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID class Result(object): """Container for async results""" value = None def set_result(value, result): result.value = value class TestSessionV1(TestCase): version = "1" bulk = False basics = [ (OID(".1.3.6.1.4.2.1.1"), 1), (OID(".1.3.6.1.4.2.1.2"), -1), (OID(".1.3.6.1.4.2.1.3"), 1), (OID(".1.3.6.1.4.2.1.4"), "test value"), ] def setUpSession(self, address): self.session = Session( version=self.version, community="public", peername=address, _use_bulk=self.bulk) self.session.open() def tearDownSession(self): self.session.close() def test_sget(self): result = self.session.sget([x for x,v in self.basics]) self.assertEquals(result, self.basics) return self.finishGet() def test_get_small(self): result = Result() self.session.get([x for x,v in self.basics], set_result, result) self.session.wait() self.assertEquals(result.value, self.basics) return self.finishGet() def test_get_big(self): oids = [] for i in xrange(1, 100): oids.append(OID((1,3,6,1,4,2,4,i))) result = Result() self.session.get(oids, set_result, result) self.session.wait() result = dict(result.value) for oid in oids: assert oid in result assert result[oid] == "data data data data" return self.finishGet() def test_walk_tree(self): result = Result() self.session.walk([".1.3.6.1.4.2.1"], set_result, result) self.session.wait() self.assertEquals(result.value, self.basics) return self.finishWalk() def test_walk_leaf(self): oid = OID(".1.3.6.1.4.2.1.1") result = Result() self.session.walk([oid], set_result, result) self.session.wait() self.assertEquals(result.value, [(oid, 1)]) return self.finishGet() def test_walk_strict(self): oid = OID(".1.3.6.1.4.2.1.1") result = Result() self.session.walk([oid], set_result, result, strict=True) self.session.wait() self.assertEquals(result.value, []) return self.finishStrictWalk() def test_sysDescr(self): result = self.session.sget([OID("SNMPv2-MIB::sysDescr.0")]) self.assert_(result) self.assertIsInstance(result[0][1], str) self.assert_(len(result[0][1]) > 0) return self.finishGet() class TestSessionV2c(TestSessionV1): version = "2c" def test_hrSystemDate(self): # This is a special string that gets formatted using the # MIB's DISPLAY-HINT value. Also, strip off everything # other than the date and hour to avoid a race condition. # And one more quirk, these dates are not zero padded # so we must format the date manually, whee... now = time.localtime() now = "%d-%d-%d,%d" % (now[0], now[1], now[2], now[3]) result = self.session.sget([OID(".1.3.6.1.2.1.25.1.2.0")]) self.assert_(result) value = result[0][1].split(':', 1)[0] self.assertEquals(value, now) return self.finishGet() class TestSessionV2cBulk(TestSessionV2c): bulk = True class TestTimeoutsV1(unittest.TestCase): version = "1" def setUp(self): self.session = Session( version=self.version, community="public", peername="udp:127.0.0.1:9", retries=0, timeout=0.1) self.session.open() def test_sget(self): self.assertRaises(SnmpError, self.session.sget, [".1.3.6.1.4.2.1.1"]) def test_get(self): result = Result() self.session.get([".1.3.6.1.4.2.1.1"], set_result, result) self.session.wait() assert isinstance(result.value, SnmpTimeout) def tearDown(self): self.session.close() class TestTimeoutsV2c(TestTimeoutsV1): version = "2c" class TestOID(unittest.TestCase): def test_oid_name(self): oid = OID("1.3.6.1.2.1.1.1.0") self.assertEquals(oid, OID("SNMPv2-MIB::sysDescr.0")) self.assertEquals(oid, OID("sysDescr.0"))
[ "snapy.netsnmp.Session", "time.localtime", "snapy.netsnmp.OID" ]
[((1135, 1227), 'snapy.netsnmp.Session', 'Session', ([], {'version': 'self.version', 'community': '"""public"""', 'peername': 'address', '_use_bulk': 'self.bulk'}), "(version=self.version, community='public', peername=address,\n _use_bulk=self.bulk)\n", (1142, 1227), False, 'from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID\n'), ((2489, 2512), 'snapy.netsnmp.OID', 'OID', (['""".1.3.6.1.4.2.1.1"""'], {}), "('.1.3.6.1.4.2.1.1')\n", (2492, 2512), False, 'from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID\n'), ((2751, 2774), 'snapy.netsnmp.OID', 'OID', (['""".1.3.6.1.4.2.1.1"""'], {}), "('.1.3.6.1.4.2.1.1')\n", (2754, 2774), False, 'from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID\n'), ((3648, 3664), 'time.localtime', 'time.localtime', ([], {}), '()\n', (3662, 3664), False, 'import time\n'), ((4106, 4212), 'snapy.netsnmp.Session', 'Session', ([], {'version': 'self.version', 'community': '"""public"""', 'peername': '"""udp:127.0.0.1:9"""', 'retries': '(0)', 'timeout': '(0.1)'}), "(version=self.version, community='public', peername=\n 'udp:127.0.0.1:9', retries=0, timeout=0.1)\n", (4113, 4212), False, 'from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID\n'), ((4798, 4822), 'snapy.netsnmp.OID', 'OID', (['"""1.3.6.1.2.1.1.1.0"""'], {}), "('1.3.6.1.2.1.1.1.0')\n", (4801, 4822), False, 'from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID\n'), ((891, 914), 'snapy.netsnmp.OID', 'OID', (['""".1.3.6.1.4.2.1.1"""'], {}), "('.1.3.6.1.4.2.1.1')\n", (894, 914), False, 'from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID\n'), ((934, 957), 'snapy.netsnmp.OID', 'OID', (['""".1.3.6.1.4.2.1.2"""'], {}), "('.1.3.6.1.4.2.1.2')\n", (937, 957), False, 'from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID\n'), ((977, 1000), 'snapy.netsnmp.OID', 'OID', (['""".1.3.6.1.4.2.1.3"""'], {}), "('.1.3.6.1.4.2.1.3')\n", (980, 1000), False, 'from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID\n'), ((1020, 1043), 'snapy.netsnmp.OID', 'OID', (['""".1.3.6.1.4.2.1.4"""'], {}), "('.1.3.6.1.4.2.1.4')\n", (1023, 1043), False, 'from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID\n'), ((4854, 4883), 'snapy.netsnmp.OID', 'OID', (['"""SNMPv2-MIB::sysDescr.0"""'], {}), "('SNMPv2-MIB::sysDescr.0')\n", (4857, 4883), False, 'from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID\n'), ((4916, 4933), 'snapy.netsnmp.OID', 'OID', (['"""sysDescr.0"""'], {}), "('sysDescr.0')\n", (4919, 4933), False, 'from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID\n'), ((1893, 1922), 'snapy.netsnmp.OID', 'OID', (['(1, 3, 6, 1, 4, 2, 4, i)'], {}), '((1, 3, 6, 1, 4, 2, 4, i))\n', (1896, 1922), False, 'from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID\n'), ((3044, 3073), 'snapy.netsnmp.OID', 'OID', (['"""SNMPv2-MIB::sysDescr.0"""'], {}), "('SNMPv2-MIB::sysDescr.0')\n", (3047, 3073), False, 'from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID\n'), ((3764, 3792), 'snapy.netsnmp.OID', 'OID', (['""".1.3.6.1.2.1.25.1.2.0"""'], {}), "('.1.3.6.1.2.1.25.1.2.0')\n", (3767, 3792), False, 'from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID\n')]
# The MIT License (MIT) # # Copyright (c) 2017 <NAME> and Adafruit Industries # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ `adafruit_rgb_display.rgb` ==================================================== Base class for all RGB Display devices * Author(s): <NAME>, <NAME> """ import time try: import struct except ImportError: import ustruct as struct import adafruit_bus_device.spi_device as spi_device __version__ = "0.0.0-auto.0" __repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_RGB_Display.git" # This is the size of the buffer to be used for fill operations, in 16-bit # units. try: # If we're on CPython, try to set as large as possible import platform if "CPython" in platform.python_implementation(): # check for FT232H special case try: import os if os.environ['BLINKA_FT232H']: # we are limited by pyftdi's max SPI payload from pyftdi.spi import SpiController _BUFFER_SIZE = SpiController.PAYLOAD_MAX_LENGTH // 2 # max bytes / bytes per pixel except KeyError: # otherwise set it to blit the whole thing _BUFFER_SIZE = 320 * 240 else: # in case CircuitPython ever implements platform _BUFFER_SIZE = 256 except ImportError: # Otherwise set smaller MCU friendly size _BUFFER_SIZE = 256 def color565(r, g=0, b=0): """Convert red, green and blue values (0-255) into a 16-bit 565 encoding. As a convenience this is also available in the parent adafruit_rgb_display package namespace.""" try: r, g, b = r # see if the first var is a tuple/list except TypeError: pass return (r & 0xf8) << 8 | (g & 0xfc) << 3 | b >> 3 class DummyPin: """Can be used in place of a ``DigitalInOut()`` when you don't want to skip it.""" def deinit(self): """Dummy DigitalInOut deinit""" pass def switch_to_output(self, *args, **kwargs): """Dummy switch_to_output method""" pass def switch_to_input(self, *args, **kwargs): """Dummy switch_to_input method""" pass @property def value(self): """Dummy value DigitalInOut property""" pass @value.setter def value(self, val): pass @property def direction(self): """Dummy direction DigitalInOut property""" pass @direction.setter def direction(self, val): pass @property def pull(self): """Dummy pull DigitalInOut property""" pass @pull.setter def pull(self, val): pass class Display: #pylint: disable-msg=no-member """Base class for all RGB display devices :param width: number of pixels wide :param height: number of pixels high """ _PAGE_SET = None _COLUMN_SET = None _RAM_WRITE = None _RAM_READ = None _X_START = 0 # pylint: disable=invalid-name _Y_START = 0 # pylint: disable=invalid-name _INIT = () _ENCODE_PIXEL = ">H" _ENCODE_POS = ">HH" _DECODE_PIXEL = ">BBB" def __init__(self, width, height): self.width = width self.height = height self.init() def init(self): """Run the initialization commands.""" for command, data in self._INIT: self.write(command, data) #pylint: disable-msg=invalid-name,too-many-arguments def _block(self, x0, y0, x1, y1, data=None): """Read or write a block of data.""" self.write(self._COLUMN_SET, self._encode_pos(x0 + self._X_START, x1 + self._X_START)) self.write(self._PAGE_SET, self._encode_pos(y0 + self._Y_START, y1 + self._Y_START)) if data is None: size = struct.calcsize(self._DECODE_PIXEL) return self.read(self._RAM_READ, (x1 - x0 + 1) * (y1 - y0 + 1) * size) self.write(self._RAM_WRITE, data) return None #pylint: enable-msg=invalid-name,too-many-arguments def _encode_pos(self, x, y): """Encode a postion into bytes.""" return struct.pack(self._ENCODE_POS, x, y) def _encode_pixel(self, color): """Encode a pixel color into bytes.""" return struct.pack(self._ENCODE_PIXEL, color) def _decode_pixel(self, data): """Decode bytes into a pixel color.""" return color565(*struct.unpack(self._DECODE_PIXEL, data)) def pixel(self, x, y, color=None): """Read or write a pixel at a given position.""" if color is None: return self._decode_pixel(self._block(x, y, x, y)) if 0 <= x < self.width and 0 <= y < self.height: self._block(x, y, x, y, self._encode_pixel(color)) return None def image(self, img, rotation=0): """Set buffer to value of Python Imaging Library image. The image should be in 1 bit mode and a size equal to the display size.""" if not img.mode in ('RGB', 'RGBA'): raise ValueError('Image must be in mode RGB or RGBA') if rotation not in (0, 90, 180, 270): raise ValueError('Rotation must be 0/90/180/270') if rotation != 0: img = img.rotate(rotation, expand=True) imwidth, imheight = img.size if imwidth != self.width or imheight != self.height: raise ValueError('Image must be same dimensions as display ({0}x{1}).' \ .format(self.width, self.height)) pixels = bytearray(self.width * self.height * 2) # Iterate through the pixels for x in range(self.width): # yes this double loop is slow, for y in range(self.height): # but these displays are small! pix = color565(img.getpixel((x, y))) pixels[2*(y * self.width + x)] = pix >> 8 pixels[2*(y * self.width + x) + 1] = pix & 0xFF #print([hex(x) for x in pixels]) self._block(0, 0, self.width-1, self.height - 1, pixels) #pylint: disable-msg=too-many-arguments def fill_rectangle(self, x, y, width, height, color): """Draw a rectangle at specified position with specified width and height, and fill it with the specified color.""" x = min(self.width - 1, max(0, x)) y = min(self.height - 1, max(0, y)) width = min(self.width - x, max(1, width)) height = min(self.height - y, max(1, height)) self._block(x, y, x + width - 1, y + height - 1, b'') chunks, rest = divmod(width * height, _BUFFER_SIZE) pixel = self._encode_pixel(color) if chunks: data = pixel * _BUFFER_SIZE for _ in range(chunks): self.write(None, data) self.write(None, pixel * rest) #pylint: enable-msg=too-many-arguments def fill(self, color=0): """Fill the whole display with the specified color.""" self.fill_rectangle(0, 0, self.width, self.height, color) def hline(self, x, y, width, color): """Draw a horizontal line.""" self.fill_rectangle(x, y, width, 1, color) def vline(self, x, y, height, color): """Draw a vertical line.""" self.fill_rectangle(x, y, 1, height, color) class DisplaySPI(Display): """Base class for SPI type devices""" #pylint: disable-msg=too-many-arguments def __init__(self, spi, dc, cs, rst=None, width=1, height=1, baudrate=12000000, polarity=0, phase=0, *, x_offset=0, y_offset=0): self.spi_device = spi_device.SPIDevice(spi, cs, baudrate=baudrate, polarity=polarity, phase=phase) self.dc_pin = dc self.rst = rst self.dc_pin.switch_to_output(value=0) if self.rst: self.rst.switch_to_output(value=0) self.reset() self._X_START = x_offset # pylint: disable=invalid-name self._Y_START = y_offset # pylint: disable=invalid-name super().__init__(width, height) #pylint: enable-msg=too-many-arguments def reset(self): """Reset the device""" self.rst.value = 0 time.sleep(0.050) # 50 milliseconds self.rst.value = 1 time.sleep(0.050) # 50 milliseconds # pylint: disable=no-member def write(self, command=None, data=None): """SPI write to the device: commands and data""" if command is not None: self.dc_pin.value = 0 with self.spi_device as spi: spi.write(bytearray([command])) if data is not None: self.dc_pin.value = 1 with self.spi_device as spi: spi.write(data) def read(self, command=None, count=0): """SPI read from device with optional command""" data = bytearray(count) self.dc_pin.value = 0 with self.spi_device as spi: if command is not None: spi.write(bytearray([command])) if count: spi.readinto(data) return data
[ "platform.python_implementation", "adafruit_bus_device.spi_device.SPIDevice", "ustruct.calcsize", "time.sleep", "ustruct.pack", "ustruct.unpack" ]
[((1729, 1761), 'platform.python_implementation', 'platform.python_implementation', ([], {}), '()\n', (1759, 1761), False, 'import platform\n'), ((5103, 5138), 'ustruct.pack', 'struct.pack', (['self._ENCODE_POS', 'x', 'y'], {}), '(self._ENCODE_POS, x, y)\n', (5114, 5138), True, 'import ustruct as struct\n'), ((5238, 5276), 'ustruct.pack', 'struct.pack', (['self._ENCODE_PIXEL', 'color'], {}), '(self._ENCODE_PIXEL, color)\n', (5249, 5276), True, 'import ustruct as struct\n'), ((8530, 8615), 'adafruit_bus_device.spi_device.SPIDevice', 'spi_device.SPIDevice', (['spi', 'cs'], {'baudrate': 'baudrate', 'polarity': 'polarity', 'phase': 'phase'}), '(spi, cs, baudrate=baudrate, polarity=polarity, phase=phase\n )\n', (8550, 8615), True, 'import adafruit_bus_device.spi_device as spi_device\n'), ((9144, 9160), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (9154, 9160), False, 'import time\n'), ((9216, 9232), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (9226, 9232), False, 'import time\n'), ((4745, 4780), 'ustruct.calcsize', 'struct.calcsize', (['self._DECODE_PIXEL'], {}), '(self._DECODE_PIXEL)\n', (4760, 4780), True, 'import ustruct as struct\n'), ((5385, 5424), 'ustruct.unpack', 'struct.unpack', (['self._DECODE_PIXEL', 'data'], {}), '(self._DECODE_PIXEL, data)\n', (5398, 5424), True, 'import ustruct as struct\n')]