repo_name
stringlengths
7
111
__id__
int64
16.6k
19,705B
blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
5
151
content_id
stringlengths
40
40
detected_licenses
sequence
license_type
stringclasses
2 values
repo_url
stringlengths
26
130
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
42
visit_date
unknown
revision_date
unknown
committer_date
unknown
github_id
int64
14.6k
687M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
12 values
gha_fork
bool
2 classes
gha_event_created_at
unknown
gha_created_at
unknown
gha_updated_at
unknown
gha_pushed_at
unknown
gha_size
int64
0
10.2M
gha_stargazers_count
int32
0
178k
gha_forks_count
int32
0
88.9k
gha_open_issues_count
int32
0
2.72k
gha_language
stringlengths
1
16
gha_archived
bool
1 class
gha_disabled
bool
1 class
content
stringlengths
10
2.95M
src_encoding
stringclasses
5 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
10
2.95M
extension
stringclasses
19 values
num_repo_files
int64
1
202k
filename
stringlengths
4
112
num_lang_files
int64
1
202k
alphanum_fraction
float64
0.26
0.89
alpha_fraction
float64
0.2
0.89
hex_fraction
float64
0
0.09
num_lines
int32
1
93.6k
avg_line_length
float64
4.57
103
max_line_length
int64
7
931
squaresLab/Houston
12,034,498,404,263
e6997b3ba693ef07ccf08a7fc7cff0ef66c2dd13
5332f9f303d38dfabf367cfaab3f75627163e4f3
/houston/ardu/home.py
b6e4c9351deadecc33cd2d38bddba5fc947c59fc
[ "MIT" ]
permissive
https://github.com/squaresLab/Houston
59d81aa61cf6c8d0c6172a67b382bf25843d358e
088cb52fe2d36dfb25b03b98f51b09d56f7d47d0
refs/heads/master
"2021-06-22T18:50:21.150971"
"2019-08-01T22:07:11"
"2019-08-01T22:07:11"
82,945,828
3
2
MIT
false
"2019-08-01T22:07:12"
"2017-02-23T16:16:47"
"2019-08-01T21:26:35"
"2019-08-01T22:07:11"
1,982
3
3
56
Python
false
false
__all__ = ('HomeLocation',) from typing import Dict, Any import attr @attr.s(frozen=True) class HomeLocation: latitude = attr.ib(type=float) longitude = attr.ib(type=float) altitude = attr.ib(type=int) heading = attr.ib(type=int) def __str__(self) -> str: return "{},{},{},{}".format(self.latitude, self.longitude, self.altitude, self.heading) @staticmethod def from_dict(d: Dict[str, Any]) -> 'HomeLocation': return HomeLocation(**d) def to_dict(self) -> Dict[str, Any]: return {'latitude': self.latitude, 'longitude': self.longitude, 'altitude': self.altitude, 'heading': self.heading}
UTF-8
Python
false
false
807
py
88
home.py
81
0.51425
0.51425
0
29
26.827586
55
developer0hye/PyTorch-DLA
4,698,694,258,441
952818fa82a11ede5e83b726a97122c4754a0a49
52c7d43eb42198d1d198bfb14618e9d418e6273d
/dla.py
ece64398fa8c4152b5eae4cdcc6528b91aec142d
[]
no_license
https://github.com/developer0hye/PyTorch-DLA
16fcc81cea2b17724d540864c504c9763b731185
b8eb62aac7c32f3387773da7164e3e7cc45e90ff
refs/heads/main
"2023-02-01T00:53:53.100785"
"2020-12-15T03:12:36"
"2020-12-15T03:12:36"
315,783,758
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import os, sys import math import torch import torch.nn as nn import numpy as np def conv_bn_relu(in_channels, out_channels, kernel_size=3, stride=1): return nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size//2, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=False)) def conv_bn(in_channels, out_channels, kernel_size=3, stride=1): return nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size//2, bias=False), nn.BatchNorm2d(out_channels)) class Aggregation(nn.Module): def __init__(self, in_channels, out_channels): super(Aggregation, self).__init__() self.aggregation = conv_bn_relu(in_channels, out_channels, kernel_size=1) def forward(self, *x): x = torch.cat(x, dim=1) return self.aggregation(x) class BasicBlock(nn.Module): def __init__(self, in_channels, out_channels, stride=1): super(BasicBlock, self).__init__() if stride == 1: self.shortcut = nn.Identity() else: self.shortcut = nn.Sequential(nn.MaxPool2d(2), conv_bn(in_channels, out_channels, kernel_size=1)) self.conv1 = conv_bn_relu(in_channels, out_channels, stride=stride) self.conv2 = conv_bn(out_channels, out_channels) def forward(self, x): return torch.relu(self.shortcut(x) + self.conv2(self.conv1(x))) class HDATree(nn.Module): def __init__(self, depth, aggregate_root, root_channels, in_channels, out_channels, stride=1, block=BasicBlock): super(HDATree, self).__init__() self.depth = depth self.aggregate_root = aggregate_root if root_channels == 0: root_channels = out_channels * 2 if aggregate_root: root_channels += in_channels self.downsample = nn.MaxPool2d(2) if depth == 1: self.child1 = block(in_channels, out_channels, stride) self.child2 = block(out_channels, out_channels) self.aggregation = Aggregation(root_channels, out_channels) else: #recursively generate layers self.child1 = HDATree(depth=depth-1, aggregate_root=False, root_channels=0, in_channels=in_channels, out_channels=out_channels, stride=stride) self.child2 = HDATree(depth=depth-1, aggregate_root=False, root_channels=root_channels + out_channels, in_channels=out_channels, out_channels=out_channels) def forward(self, x, aggregated_features=None): if aggregated_features is None: aggregated_features = [] if self.aggregate_root: aggregated_features.append(self.downsample(x)) x1 = self.child1(x) if self.depth == 1: x2 = self.child2(x1) x = self.aggregation(x1, x2, *aggregated_features) else: aggregated_features.append(x1) x = self.child2(x1, aggregated_features) return x class DLA(nn.Module): def __init__(self, stages_depth, channels, num_classes=1000, block=BasicBlock): super(DLA, self).__init__() #Refer to 4.1. Classification Networks in paper "Deep Layer Aggregation" #"basic block" means conv_bn_relu layer in stage1 and stage2 and means one of three types of residual blocks in all other stages. #The stage 1 is composed of a 7×7 convolution followed by a basic block. self.stage1 = conv_bn_relu(3, channels[0], stride=2) #The stage 2 is only a basic block. self.stage2 = conv_bn_relu(channels[0], channels[1], stride=2) #For all other stages, we make use of combined IDA and HDA on the backbone blocks and stages. self.stage3 = HDATree(depth=stages_depth[2], aggregate_root=False, root_channels=0, in_channels=channels[1], out_channels=channels[2], stride=2) self.stage4 = HDATree(depth=stages_depth[3], aggregate_root=True, root_channels=0, in_channels=channels[2], out_channels=channels[3], stride=2) self.stage5 = HDATree(depth=stages_depth[4], aggregate_root=True, root_channels=0, in_channels=channels[3], out_channels=channels[4], stride=2) self.stages = nn.ModuleList([self.stage1, self.stage2, self.stage3, self.stage4, self.stage5]) self.gap = nn.AdaptiveAvgPool2d((1, 1))#global average pooling self.fc = nn.Conv2d(channels[4], num_classes, kernel_size=1, stride=1, padding=0, bias=True) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1.) nn.init.constant_(m.bias, 0.) def forward(self, x): y = [] for stage in self.stages: x = stage(x) y.append(x) print(x.shape) x = self.gap(x) x = self.fc(x) x = x.flatten(start_dim=1) return x if __name__ == '__main__': model = DLA([1, 1, 1, 2, 1], [32, 64, 128, 256, 512]).cuda() model.eval() x = torch.randn(1, 3, 224, 224).cuda() import time model(x) avg_time = 0 for i in range(0, 10): torch.cuda.synchronize() t2 = time.time() model(x) torch.cuda.synchronize() t3 = time.time() avg_time += t3 - t2 avg_time /= 10.0 print('avg_time: ', avg_time) # base_name = 'dla34' # x = torch.randn(1, 3, 256, 256) # model = globals()[base_name](pretrained=True, return_levels=False) # 모델 변환 torch.onnx.export(model, # 실행될 모델 x, # 모델 입력값 (튜플 또는 여러 입력값들도 가능) "dla34_yh.onnx", # 모델 저장 경로 (파일 또는 파일과 유사한 객체 모두 가능) export_params=True, # 모델 파일 안에 학습된 모델 가중치를 저장할지의 여부 opset_version=10, # 모델을 변환할 때 사용할 ONNX 버전 do_constant_folding=True, # 최적하시 상수폴딩을 사용할지의 여부 input_names = ['input'], # 모델의 입력값을 가리키는 이름 output_names = ['output'], # 모델의 출력값을 가리키는 이름 dynamic_axes={'input' : {0 : 'batch_size'}, # 가변적인 길이를 가진 차원 'output' : {0 : 'batch_size'}})
UTF-8
Python
false
false
6,312
py
3
dla.py
1
0.630568
0.605067
0
215
27.083721
131
sim1234/Labirynt
11,235,634,484,810
a05d8eab1f3dad40b2682236f93ff0338ce09d90
ba1591113393cde9d5a06ea1d43092482ce9b9f1
/main.py
8e89d39de3271045884ac3f475341804e5a39db7
[]
no_license
https://github.com/sim1234/Labirynt
9b67e66a950f71f97a1929ec47cbba1f76923e7e
c47b9f18c57a1b62d3a3e713168c8701f9d15177
refs/heads/master
"2021-01-01T17:47:46.908943"
"2013-08-09T12:49:28"
"2013-08-09T12:49:28"
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from game import game def main(): gra = game(640,480) gra.play() if __name__ == '__main__': main()
UTF-8
Python
false
false
113
py
3
main.py
3
0.522124
0.469027
0
9
11.666667
26
Idbruh/TrabalhosPyhton
6,545,530,170,951
117c8bfde1bcc9dc04019b22d6be4e52ee61ec89
43f557b2d6aa3db3c1e156c10d11813051e1902b
/Aula40/aula40.py
f3292d2eec5181d9e49624c4f14969738899ca6b
[]
no_license
https://github.com/Idbruh/TrabalhosPyhton
3d379342c1a46a6151c6d44e4a0dfec6d0976b70
b14091b1f22959abd173622b36a84ec58506f8c5
refs/heads/master
"2020-09-05T21:27:53.939072"
"2020-02-27T11:43:42"
"2020-02-27T11:43:42"
220,218,840
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# FLASK RESTFUL # GET N # POST # PUT # DELETE # controller herança em python - pega uma classe j gerada(importando tudo que tem em uma classe já existente)
UTF-8
Python
false
false
160
py
116
aula40.py
105
0.734177
0.734177
0
9
16.666667
96
ressay/ArchToCE
13,821,204,793,832
493608a2f20ff26c3794e0c49489d81683c9bdbd
b07304822e097cc17f374c400a2b493694b47904
/Samples/tryTwo.py
9a2b1736a14b13688ad7007a5b4ba0d866484ed3
[]
no_license
https://github.com/ressay/ArchToCE
585583cb2860be2085f5a0ee11de11c37b3c39e4
ccd194ace8257476952fbc82a1401a0ee7ca779d
refs/heads/master
"2022-06-22T00:43:19.113778"
"2022-06-19T20:31:53"
"2022-06-19T20:31:53"
139,846,206
3
5
null
false
"2020-03-16T13:51:56"
"2018-07-05T12:27:27"
"2020-03-16T11:30:51"
"2020-03-16T13:49:01"
62,066
2
2
0
Python
false
false
from OCC.Display.SimpleGui import init_display display, start_display, add_menu, add_function_to_menu = init_display() def simple_test(event=None): display.Test() def simple_cylinder(event=None): from OCC.Core.BRepPrimAPI import BRepPrimAPI_MakeCylinder s = BRepPrimAPI_MakeCylinder(60, 200).Shape() display.DisplayShape(s) add_menu('simple test') add_function_to_menu('simple test',simple_test) add_function_to_menu('simple test',simple_cylinder) display.View_Iso() display.FitAll() from OCC.Core.BRepPrimAPI import BRepPrimAPI_MakeBox my_box = BRepPrimAPI_MakeBox(10., 20., 30.).Shape() display.DisplayShape(my_box, update=True) start_display() # display loop
UTF-8
Python
false
false
685
py
55
tryTwo.py
43
0.754745
0.738686
0
26
25.384615
71
frenck/iaqualink-py
6,511,170,449,000
c162d1d7a22f7137ebbf743bedc57788e83f8b3e
23c06cce660a7211e59a1e18aa5ce40b8ba9cf49
/tests/test_system.py
dd39737299db58c256d5145ebb840584e04e209a
[ "BSD-3-Clause" ]
permissive
https://github.com/frenck/iaqualink-py
0ac46f8e9f9f830efdd0df9425ded08fdd48a0a1
c75d693225556ec59e32088479be9c0fd84f6e9c
refs/heads/master
"2020-12-22T07:03:57.589339"
"2019-10-14T01:49:28"
"2019-10-14T01:49:28"
236,705,808
0
0
BSD-3-Clause
true
"2020-01-28T10:08:30"
"2020-01-28T10:08:29"
"2019-10-14T22:59:23"
"2019-10-14T22:59:21"
51
0
0
0
null
false
false
import asynctest import pytest from iaqualink.exception import AqualinkSystemOfflineException from iaqualink.system import AqualinkSystem, AqualinkPoolSystem from .common import async_noop, async_raises class TestAqualinkSystem(asynctest.TestCase): def setUp(self) -> None: pass @asynctest.fail_on(unused_loop=False) def test_from_data_iaqua(self): aqualink = asynctest.MagicMock() data = {"id": 1, "serial_number": "ABCDEFG", "device_type": "iaqua"} r = AqualinkSystem.from_data(aqualink, data) assert r is not None assert isinstance(r, AqualinkPoolSystem) @asynctest.fail_on(unused_loop=False) def test_from_data_unsupported(self): aqualink = asynctest.MagicMock() data = {"id": 1, "serial_number": "ABCDEFG", "device_type": "foo"} r = AqualinkSystem.from_data(aqualink, data) assert r is None @asynctest.strict async def test_update_success(self): aqualink = asynctest.MagicMock() data = {"id": 1, "serial_number": "ABCDEFG", "device_type": "iaqua"} r = AqualinkSystem.from_data(aqualink, data) r.aqualink.send_home_screen_request = async_noop r.aqualink.send_devices_screen_request = async_noop r._parse_home_response = async_noop r._parse_devices_response = async_noop await r.update() assert r.last_run_success is True assert r.online is True @asynctest.strict async def test_update_failure(self): aqualink = asynctest.MagicMock() data = {"id": 1, "serial_number": "ABCDEFG", "device_type": "iaqua"} r = AqualinkSystem.from_data(aqualink, data) r.aqualink.send_home_screen_request = async_raises await r.update() assert r.last_run_success is False assert r.online is None @asynctest.strict async def test_update_offline(self): aqualink = asynctest.MagicMock() data = {"id": 1, "serial_number": "ABCDEFG", "device_type": "iaqua"} r = AqualinkSystem.from_data(aqualink, data) r.aqualink.send_home_screen_request = async_noop r.aqualink.send_devices_screen_request = async_noop r._parse_home_response = async_raises(AqualinkSystemOfflineException) await r.update() assert r.last_run_success is True assert r.online is False
UTF-8
Python
false
false
2,369
py
3
test_system.py
2
0.654707
0.652596
0
62
37.193548
77
eapb99/Commerce-Django
858,993,475,565
6f3ace025f57beea4c2110fc3b93625a4d40028d
d98d7a606b6c25e3c698cdac069b6039868bf4b9
/commerce/auctions/migrations/0002_bid_category_comment_listing.py
80b160c07b043c8cdd5e9bfb0c547f3d2f7e3e7b
[]
no_license
https://github.com/eapb99/Commerce-Django
8def756b405e8803c7ae8272936a9b2aa43d9a3d
4303c2dc2b06145ec7178df12aba31e325f27f5f
refs/heads/master
"2023-03-29T11:18:27.646687"
"2021-03-18T03:41:04"
"2021-03-18T03:41:04"
348,927,495
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Generated by Django 3.1.3 on 2021-03-04 22:52 from django.conf import settings import django.core.validators from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('auctions', '0001_initial'), ] operations = [ migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('code', models.CharField(blank=True, max_length=8, null=True, unique=True)), ('name', models.CharField(max_length=32, unique=True)), ('description', models.CharField(blank=True, max_length=128, null=True)), ('image', models.URLField(max_length=500)), ], ), migrations.CreateModel( name='Listing', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('lis_name', models.CharField(max_length=50)), ('lis_description', models.CharField(max_length=500)), ('lis_image', models.CharField(max_length=500)), ('lis_active', models.BooleanField(default=False)), ('lis_date', models.DateTimeField(auto_now=True)), ('lis_price', models.DecimalField(decimal_places=2, max_digits=10, validators=[django.core.validators.MinValueValidator(1)])), ('category', models.ManyToManyField(blank=True, related_name='listing_category', to='auctions.Category')), ('lis_user', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='listing_user', to=settings.AUTH_USER_MODEL)), ('watchlist', models.ManyToManyField(blank=True, related_name='watchlist', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Comment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('content', models.CharField(max_length=500)), ('date', models.DateTimeField(auto_now_add=True)), ('listings', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comment_listing', to='auctions.listing')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comment_user', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Bid', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('initial', models.DecimalField(decimal_places=2, default=0, max_digits=6, validators=[django.core.validators.MinValueValidator(0.01)])), ('date', models.DateTimeField(auto_now_add=True)), ('listings', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bid_listings', to='auctions.listing')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bid_user', to=settings.AUTH_USER_MODEL)), ], ), ]
UTF-8
Python
false
false
3,344
py
19
0002_bid_category_comment_listing.py
10
0.60616
0.591507
0
61
53.819672
163
Ash515/awesomeScripts
3,667,902,112,989
01999ca80febb3a4f4749b8c88e083e0c2d63063
54489f723841157ff6f9d3aa3d9d9590f27d2ed5
/send-discord_message/auth.py
3860d3f437292f804949ff0c01eb286f70a2aafc
[ "MIT" ]
permissive
https://github.com/Ash515/awesomeScripts
8126751d9cd47564e6d76726cbde38a4291cf223
3a2c0888207aca044e03ec9f387035e14879fd15
refs/heads/master
"2022-12-23T09:42:33.998020"
"2020-10-05T11:50:18"
"2020-10-05T11:50:18"
301,373,870
8
0
MIT
true
"2020-10-05T10:40:52"
"2020-10-05T10:40:52"
"2020-10-05T10:40:47"
"2020-10-05T10:40:45"
4,364
0
0
0
null
false
false
# Enter your discord channel webhook_url WEBHOOK_URL = ""
UTF-8
Python
false
false
58
py
152
auth.py
67
0.741379
0.741379
0
2
28
40
joao3872/exercios-Python
9,414,568,321,583
32850215bc807d4357a5c0f0a9b4259209050c8b
63a1fde24dceff2361e40d3f367bcd51b90baede
/Parte 2/desafio42.py
689cf3a13c49cb0fcd9a38d1556dee2b18686690
[]
no_license
https://github.com/joao3872/exercios-Python
44ef9a0f17cc1c90f01f582169ff444438b53403
3d8d07635903baf78a8ca13d53016f1a1038296a
refs/heads/main
"2023-03-05T16:55:58.091208"
"2022-01-26T01:17:10"
"2022-01-26T01:17:10"
316,858,020
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
s1 = int(input('Informe o primeiro segmento: ')) s2 = int(input('Informe o segundo segmento: ')) s3 = int(input('Informe o terceiro segmento: ')) veja = ((s2 - s3) < s1 < s2 + s3) and ((s1 - s3) < s2 < s1 + s3) and ((s1 - s2) < s3 < s1 + s2) e = (s1 == s2 != s3) or (s1 != s2 == s3) or (s2 != s1 == s3) if(e and veja): print('Os Segmentos Formam um Triângulo ISÓSCELES !') elif(s1 == s2 == s3 and veja): print('Os Segmentos Formam um Triângulo EQUILÁTERO !') elif(s1 != s2 != s3 and veja): print('Os Segmentos Formam um Triângulo ESCALENO !') else: print('Os Segmentos Não formam um Triângulo.')
UTF-8
Python
false
false
634
py
110
desafio42.py
108
0.596491
0.54386
0
14
42.785714
95
jjmalina/tumblrgifsfeed
17,557,826,306,254
bfc5de37ac9dba68efa04bea613cfee1f0b739e1
eaa9703b8a91de94484ac3497298c5e4595dbd8c
/app/gifs/migrations/0002_auto__add_field_photo_timestamp.py
ab1f89aa26fcf0d1c5ef4d21786d076b06892377
[]
no_license
https://github.com/jjmalina/tumblrgifsfeed
99fb8e1b56b3c12bf20f112a07632254ad1c8c6c
16c8af8e944c37ba236880915b89fab58d84e0cf
refs/heads/master
"2021-01-19T13:02:30.617485"
"2012-10-05T21:07:36"
"2012-10-05T21:07:36"
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Photo.timestamp' db.add_column('gifs_photo', 'timestamp', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'Photo.timestamp' db.delete_column('gifs_photo', 'timestamp') models = { 'gifs.photo': { 'Meta': {'object_name': 'Photo'}, 'date_downloaded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_posted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'note_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'post_url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'photos'", 'symmetrical': 'False', 'to': "orm['gifs.Tag']"}), 'tags_json': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}), 'timestamp': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '255'}), 'width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}) }, 'gifs.tag': { 'Meta': {'object_name': 'Tag'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}) } } complete_apps = ['gifs']
UTF-8
Python
false
false
2,121
py
16
0002_auto__add_field_photo_timestamp.py
9
0.548326
0.54314
0
42
49.52381
155
Zhiguo-Uconn/hrs_django
7,181,185,322,105
1802ee7ed2fa606b8d0cc4893c57c33c47d7b10c
5d828263fdd7efcf7c35061388d6e413040b6784
/hrs/PatientIdMiddleware.py
6e902e83ac5776b6207774ab3674b5b507de454c
[]
no_license
https://github.com/Zhiguo-Uconn/hrs_django
11fee91d4b20104e8b616feb7c4f7238a8e9dfaa
c18a90fe3cdcf0192a8863689e382615d0f515f3
refs/heads/master
"2020-08-30T18:08:22.068451"
"2019-11-04T16:41:19"
"2019-11-04T16:41:19"
218,453,586
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.utils.deprecation import MiddlewareMixin from django.urls import resolve class PatientIdMiddleware(MiddlewareMixin): def process_request(self, request): print("------------\n") print(hasattr(request, 'pid')) if not hasattr(request, 'pid'): request._cached_pid = 0 print(request.pid) print(resolve(request.path_info).url_name)
UTF-8
Python
false
false
419
py
19
PatientIdMiddleware.py
15
0.613365
0.610979
0
14
28.285714
52
khs50851/git-pydjgproject1
12,043,088,304,259
c765aacfe66525a85111852fe0a88ca736f80e79
43c8f8f47ebe3aaff56c39a9e78d8d407f6a9aa8
/py_community/user1/models.py
a3bc5a20745585a20c42e8b08b361970667bbe9c
[]
no_license
https://github.com/khs50851/git-pydjgproject1
d23ad38630b351d585c6fc85e32162a6ecb177c7
b6918144b43990615744e0c7b36b0d971b140ea3
refs/heads/master
"2023-01-24T13:49:11.027031"
"2020-12-03T07:54:56"
"2020-12-03T07:54:56"
317,441,580
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.db import models # Create your models here. class User1(models.Model): objects = models.Manager() username = models.CharField(max_length=32, verbose_name='사용자명') useremail = models.EmailField(max_length=128, # 이메일필드는 char필드랑 비슷한데 이메일 형식으로 받게함 verbose_name='이메일') # 만약에 이메일 필드를 추가할 경우 먼저 데이터베이스에 이메일 필드 없이 데이터가 있을때 # 이메일 필드를 추가해서 migrations하면 내가 직접 default값 넣을건지 default='', verbose_name='이메일' # 아니면 지정되 default값 넣을건지 물어봄 # 우선 여기서 1번을 눌러 기본값 넣음 password = models.CharField(max_length=64, verbose_name='비밀번호') registered_dttm = models.DateTimeField(auto_now_add=True, verbose_name='등록시간') def __str__(self): # 장고 페이지에서 아이디 명에 표현된건 그냥 클래스를 문자로 나타낸거 # 파이썬에는 이 클래스가 문자열로 변환할때 어떻게 변환할지 하는 함수를 호출하는데 그게 str임 # 그렇게 유저네임을 반환하게함 return self.username class Meta: db_table = 'pydjgproject_user1' # 위에 필드에 verbose name한것처럼 여기서도 지정 verbose_name = '유저' verbose_name_plural = '유저'
UTF-8
Python
false
false
1,575
py
9
models.py
6
0.58504
0.576135
0
29
37.724138
105
memimo/Timothee
7,550,552,532,258
a7f0b5db84b1ecf23dbb0e39e59f6c2c29c9eb35
fc2640f9cc10539c4d5475906563c05b1c2cdba5
/python/test_module.py
0491297cdb4767fb045df1b8c193dae575f83456
[]
no_license
https://github.com/memimo/Timothee
b27c82e296e55760da177822906e124100d7b19c
0329ccbcc16855de2205013e8dbf361272368ef3
refs/heads/master
"2016-09-06T12:37:23.949760"
"2010-12-10T16:27:10"
"2010-12-10T16:27:10"
720,978
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
import unittest import scipy.io as sio from config import * import layer_lgn import layer_s1 import layer_c1 class timothee_model_test(unittest.TestCase): ''' All test are run with saved data from MAtlab implenetation on same data ''' def setUp(self): #Parameter initializations self.test_data_path = 'test_mat_data/' params['test_mode'] = True paths['image'] = paths['home'] + 'data/' paths['map'] = paths['home'] + 'map-test/' #params['s']['n_iter'] = 1000 params['c']['n_iter'] = 1000 #layer_lgn.compute_layer() layer_s1.layer_s1() #layer_c1.learn_c1('timothee', common) #--- LGN Layer -------------------------- def test_dog_filter(self): m_file = sio.loadmat(self.test_data_path + 'dog_filter.mat') filter_matlab = m_file['dog_filter'] self.assertTrue(allclose(filter_matlab, filter_matlab)) def test_compute_map(self): import layer_lgn internal_pos = [-3, -3] on_off_map = layer_lgn.compute_map(params['dog_filter'], paths['test_image'], params['crop_pos'], internal_pos, params['s']['rf_size'], params['on_off']['sub_sampling'], params['zoom']) m_file = sio.loadmat(self.test_data_path + 'on_off_map.mat') map_matlab = m_file['onOffMap'] for i_ind in range(len(on_off_map)): self.assertTrue(allclose(map_matlab[0,i_ind], on_off_map[i_ind])) def test_layer_lgn(self): for i_ind in range(params['c']['rf_size'][0]): for j_ind in range(params['c']['rf_size'][1]): on_off_map = load(paths['map'] + 'onoff.' + str(i_ind) + '.' + str(j_ind) + '.npy') m_file = sio.loadmat(self.test_data_path + 'onOff.' + str(i_ind + 1) + '.' + str(j_ind + 1) + '.mat') map_matlab = m_file['onOffMap'] for f_ind in range(len(on_off_map)): self.assertTrue(allclose(map_matlab[0,f_ind], on_off_map[f_ind])) #--- Layer S1 ----------------------- def test_learn_unlearn(self): import learn_unlearn common = {} common['on_off_map'] = load(paths['map'] + 'onoff.' + str(0) + '.' + str(0) + '.npy') center = learn_unlearn.learn_unlearn(len(common['on_off_map']), params['s']['n_iter'], [], [], params['rec_filter'], params['s'], common) m_file = sio.loadmat(self.test_data_path + 'center_s1.mat') center_matlab = m_file['center'] for i_ind in range(params['s']['n']): #print center[i_ind]['activity'],'\n', center_matlab[0][i_ind].activity, 'activity\n\n' #print center[i_ind]['n_firing'] ,'\n', center_matlab[0][i_ind].nFiring, 'nFiring\n\n' #print center[i_ind]['evol_theta_original'],'\n' ,center_matlab[0][i_ind].evolThetaOriginal, 'evolThetaOriginal\n\n' #print center[i_ind]['evol_threshold'],'\n' , center_matlab[0][i_ind].evolThreshold, 'evolThreshold\n\n' #print center[i_ind]['activity_threshold'],'\n' , center_matlab[0][i_ind].activityThreshold, 'activityThreshold\n\n' #print center[i_ind]['learning_rate'] ,'\n',center_matlab[0][i_ind].learningRate, 'learningRate\n\n' #print center[i_ind]['patch'] ,'\n', center_matlab[0][i_ind].patch, 'patch\n\n' #print center[i_ind]['patch'].mean() ,'\n', center_matlab[0][i_ind].patch.mean(), 'patch mean\n\n' self.assertAlmostEqual(center[i_ind]['patch'].mean(), center_matlab[0][i_ind].patch.mean(),2) self.assertTrue(allclose(center[i_ind]['patch'], center_matlab[0][i_ind].patch,2)) ''' def test_layer_s1(self): s1_map = load(paths['map'] + 's1.0.nS16_inhibMean0.0.npy') m_file = sio.loadmat(self.test_data_path + 's1.01.nS16_inhibMean0.00.mat') map_matlab = m_file['s1Map'] self.assertTrue(allclose(s1_map, map_matlab)) def test_record_responce(self): import layer_s1 m_file = sio.loadmat(self.test_data_path + 'response.mat') in_range = m_file['range'] center = m_file['center'] m_output = m_file['response'] common = {} common['on_off_map'] = load(paths['map'] + 'onoff.' + str(0) + '.' + str(0) + '.npy') response = layer_s1.record_response(in_range - 1, False, center, common) self.assertTrue(allclose(m_output, response)) #--- Layer C1 -------------------------------- def test_layer_c1_timothee(self): import pickle import learn_invariance cfile = open(paths['map'] + 'common.' + params['comp_name'] + '.' + params['s']['type'] + '.pck', 'r') common = pickle.load(cfile) common['weight'], common['evol'], common['thr'], common['n_firing_input'], common['n_firing_output'], common['n_above_thr'] = learn_invariance.learn_invariance([], params['c'], common) m_file = sio.loadmat(self.test_data_path + 'c1-timothee.mat') matlab_common = m_file['COMMON'][0][0] self.assertTrue(allclose(common['weight'], matlab_common.weight)) self.assertTrue(allclose(common['evol'], matlab_common.evol)) self.assertTrue(allclose(common['thr'], matlab_common.thr)) self.assertTrue(allclose(common['n_firing_input'], matlab_common.nFiringInput)) self.assertTrue(allclose(common['n_above_thr'], matlab_common.nAboveThr)) #--- Analyze ------------------------ def rf_reconstruction(self): from analyze import * m_file = sio.loadmat(self.test_data_path + 'rf.mat') rf = m_file['rf'] py_rf = rf_reconstruction(rf, params['rec_filter'], False) mat_rf = m_file['reconst_rf'] self.assertTrue(allclose(py_rf, mat_rf)) ''' if __name__ == "__main__": unittest.main()
UTF-8
Python
false
false
5,884
py
47
test_module.py
15
0.563052
0.551835
0
145
39.544828
193
deddokatana/RestCart
13,924,284,022,471
b682e40d053431abae79648e3b79c8f58a700f61
b4025cb015feb804018109adf88564bc5ded2f02
/Transactions/objects.py
ec20f7baf45b65d284f02f5445236451eacedd3a
[]
no_license
https://github.com/deddokatana/RestCart
7ae186a0837932dadf3411bf885969991a7d5469
9ae647d6fe913d0e178a3a28ab55fe6f3418d349
refs/heads/master
"2020-06-20T17:30:18.714276"
"2016-12-03T20:58:15"
"2016-12-03T20:58:15"
74,852,599
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from decimal import * from money import Money, xrates xrates.install('money.exchange.SimpleBackend') # counting tools from collections import Counter from itertools import chain import json # use google datastore to pickle an order! class Order: def __init__(self,currency_code:str='GBP'): self.currency_code = currency_code self.order_dict = {} print(self.currency_code) self.total = Money(amount=0,currency=self.currency_code) def _new_total(self): for name,properties in self.order_dict.items(): # print("Properties",properties["price"]) debbuging subtotal = Decimal(properties["price"]) * Decimal(int(properties["quantity"])) self.total = self.total + Money(subtotal,self.currency_code) def add_to_order_from_dict(self, paypal_transaction_item_list:dict): self.order_dict.update(paypal_transaction_item_list) self._new_total() return self.order_dict def from_json(self,order_json:str): self.order_dict = json.loads(order_json) self._new_total() return self.order_dict def total_to_gbp(self): return self.total.to('GBP') def total_to_currency(self,new_currency_code:str): return self.total.to(new_currency_code) def add_line(self,item:str,cost:str,quantity:int=1): for line in range(quantity): self.order_dict.update({str(item):str(cost)}) self._new_total() return self.order_dict def to_paypal_transaction_items_list(self): # convert order_dict to {"wax":{price:"1.00",quantity:1},"candle":{price:"1.00",quantity:2}} self._new_total() # just for foolproofing paypal_items = {"items":[]} print(self.order_dict) for item, properties in self.order_dict.items(): paypal_items["items"].append({"name":item,"sku":item,"price":properties["price"],"currency":str(self.total.currency),"quantity":properties["quantity"]}) print(paypal_items) return paypal_items def __money__(self): assert isinstance(self.total,Money) return self.total def __decimal__(self): return Decimal(self.total.amount)
UTF-8
Python
false
false
2,211
py
5
objects.py
3
0.644957
0.640434
0
66
32.515152
164
edt-yxz-zzd/python3_src
2,491,081,055,221
e2939486698ce754c59fe25e3340e16559ab2f20
13f4a06cd439f579e34bf38406a9d5647fe7a0f3
/nn_ns/ply_yacc_tools/app/show_yacc_productions.py
eac2b479877acdd9079ba75f749a258389ec3052
[]
no_license
https://github.com/edt-yxz-zzd/python3_src
43d6c2a8ef2a618f750b59e207a2806132076526
41f3a506feffb5f33d4559e5b69717d9bb6303c9
refs/heads/master
"2023-05-12T01:46:28.198286"
"2023-05-01T13:46:32"
"2023-05-01T13:46:32"
143,530,977
2
2
null
null
null
null
null
null
null
null
null
null
null
null
null
r''' pym -jq parse__grammar_units.py pym show_yacc_productions.py M -m nn_ns.my_fileformat.configuration.utils.parses.parse__grammar_units -da .P pym show_yacc_productions.py S -p nn_ns.my_fileformat.configuration.utils.parses -np parse__grammar_units@E:\my_data\program_source\github\edt-yxz-zzd\python3_src\nn_ns\my_fileformat\configuration\utils\parses\parse__grammar_units.py -da .P ''' __all__ = ''' show_extracted_yacc_rules extract_rules_from_yacc_productions extract_rules_from_yacc_lrparser extract_rules_from_lex_postprocessor_with_parser show_yacc_productions show_yacc_productions_from_yacc_lrparser show_yacc_productions_from_lex_postprocessor_with_parser '''.split() from ..make_yacc_LRParser import make_yacc_LRParser from ..LexPostprocessors.ILexPostprocessorWithParser import \ ILexPostprocessorWithParser from seed.pkg_tools.get_python_object import GetPythonObjectHelper import ply.yacc from collections import defaultdict def show_extracted_yacc_rules(rules): '''rules :: [(name, [[name]])] see: extract_rules_from_yacc_productions ''' for name, body in rules: print(name) op = ':' for names in body: line = ' '.join(names) print(f' {op} {line}') op = '|' return def extract_rules_from_yacc_productions(productions): '''[production] -> [rule] [ply.yacc.MiniProduction] -> [(name, [[name]])] productions :: [ply.yacc.MiniProduction] rules :: [(name, [[name]])] body :: [[name]] rules = name_body_pairs ''' d = defaultdict(list) for production in productions: name = production.name name_, _names_ = production.str.split('->') names = _names_.split() d[name].append(names) rules = name_body_pairs = sorted(d.items()) return rules def extract_rules_from_yacc_lrparser(lrparser): '''ply.yacc.LRParser -> [(name, [[name]])]''' productions = lrparser.productions return extract_rules_from_yacc_productions(productions) def extract_rules_from_lex_postprocessor_with_parser( lex_postprocessor_with_parser): '''LexPostprocessorWithParser -> [(name, [[name]])]''' lrparser = lex_postprocessor_with_parser.lrparser return extract_rules_from_yacc_lrparser(lrparser) ############################################# def show_yacc_productions(productions): rules = extract_rules_from_yacc_productions(productions) show_extracted_yacc_rules(rules) def show_yacc_productions_from_yacc_lrparser(lrparser): rules = extract_rules_from_yacc_lrparser(lrparser) show_extracted_yacc_rules(rules) def show_yacc_productions_from_lex_postprocessor_with_parser( lex_postprocessor_with_parser): rules = extract_rules_from_lex_postprocessor_with_parser(lex_postprocessor_with_parser) show_extracted_yacc_rules(rules) ############################################# ############################################# ############################################# ############################################# ############################################# ############################################# ############################################# ############################################# ############################################# ############################################# ############################################# ############################################# def main(argv=None): #seed.io.get_python_object.get_python_object/GetPythonObjectHelper ''' ModuleSystem --module x.y.z --dot_attrs .XXX.YYY ScriptAsModule # to set the __name__ --parent x.y [--name_with_path z@path/to/XXX.py]+ --dot_attrs .XXX.YYY #from file/stdin/arg Exec/Eval file --input_path path/to/XXX.py --encoding utf8] --dot_attrs .XXX.YYY stdin --dot_attrs .XXX.YYY arg --python_source --dot_attrs .XXX.YYY ''' import argparse parser_main = argparse.ArgumentParser(prog='show_yacc_productions') subparsers_main = parser_main.add_subparsers( dest='sub_command_name__level1' #, required=True , help='level1 sub-command help' ) subparsers_main.required=True parser_ModuleSystem = subparsers_main.add_parser('ModuleSystem' , aliases='M'.split() , help='via ModuleSystem') parser_ScriptAsModule = subparsers_main.add_parser('ScriptAsModule' , aliases='S'.split() , help='via ScriptAsModule') parser_Exec = subparsers_main.add_parser('Exec' , aliases='X'.split() , help='via Exec') parser_Eval = subparsers_main.add_parser('Eval' , aliases='V'.split() , help='via Eval') init_parser_ModuleSystem(parser_ModuleSystem) init_parser_ScriptAsModule(parser_ScriptAsModule) init_parser_Exec_or_Eval(parser_Exec) init_parser_Exec_or_Eval(parser_Eval) #init_parser__dot_attrs(parser_main) ################ args = parser_main.parse_args(argv) #dot_attrs = args.dot_attrs #cmd = parser_main.sub_command_name__level1 # the 'dest' x = the_object = args._handler_1_(args) if isinstance(x, ILexPostprocessorWithParser): rules = extract_rules_from_lex_postprocessor_with_parser(x) elif isinstance(x, ply.yacc.LRParser): rules = extract_rules_from_yacc_lrparser(x) else: #assume it is an module/class required by yacc lrparser = make_yacc_LRParser(x) rules = extract_rules_from_yacc_lrparser(lrparser) show_extracted_yacc_rules(rules) def init_parser__level1(_handler_1_, parser_level1): init_parser__dot_attrs(parser_level1) init_parser__level1__without_dot_attrs(_handler_1_, parser_level1) def init_parser__level1__without_dot_attrs(_handler_1_, parser_level1): parser_level1.set_defaults(_handler_1_=_handler_1_) def init_parser__level2(_handler_2_, parser_level2): init_parser__dot_attrs(parser_level2) parser_level2.set_defaults(_handler_2_=_handler_2_) def init_parser_ModuleSystem(parser_ModuleSystem): parser_ModuleSystem.add_argument('-m', '--module', type=str , required=True , help='module fullname; qname; x.y.z' ) def _handler_1_(args): return GetPythonObjectHelper.ModuleSystem( module=args.module ,dot_attrs=args.dot_attrs ) init_parser__level1(_handler_1_, parser_ModuleSystem) def init_parser_ScriptAsModule(parser_ScriptAsModule): parser_ScriptAsModule.add_argument('-p', '--parent', type=str , required=True , help='normal package fullname; qname; x.y.z' ) parser_ScriptAsModule.add_argument('-np', '--name_path_pairs' , type=str, action = 'append', default = [] #not required#, required=True , help='bare module name with path to its source file; aaa@path/to/aaa.py' ) def _handler_1_(args): return GetPythonObjectHelper.ScriptAsModule( parent=args.parent ,name_path_pair_strs= args.name_path_pairs ,dot_attrs=args.dot_attrs ) init_parser__level1(_handler_1_, parser_ScriptAsModule) def init_parser_Exec_or_Eval(parser_E): Exec_or_Eval = sub_command_name__level1 = parser_E.prog subparsers_E = parser_E.add_subparsers( dest='Exec_or_Eval_sub_command_name' , help='{Exec_or_Eval} sub-command help') parser_E_file = subparsers_E.add_parser('file' , aliases='f'.split() , help='from file') parser_E_stdin = subparsers_E.add_parser('stdin' , aliases='i'.split() , help='from stdin') parser_E_arg = subparsers_E.add_parser('arg' , aliases='a'.split() , help='from arg') init_parser_Exec_or_Eval_file(parser_E_file) init_parser_Exec_or_Eval_stdin(parser_E_stdin) init_parser_Exec_or_Eval_arg(parser_E_arg) def _handler_1_(args): Exec_or_Eval = args.sub_command_name__level1 G = GetPythonObjectHelper E = G.Exec if Exec_or_Eval == 'Exec' else G.Eval return args._handler_2_(E, args) # well, we can set _handler_2_ to _handler_1_ #not!!!: init_parser__level1(_handler_1_, parser_E) # donot: init_parser__dot_attrs init_parser__level1__without_dot_attrs(_handler_1_, parser_E) def init_parser_Exec_or_Eval_file(parser_E_file): parser_E_file.add_argument('-i', '--input_path', type=str , required=True , help='path to input file' ) parser_E_file.add_argument('-e', '--encoding', type=str , required=True , help='encoding for input file' ) def _handler_2_(E, args): return E.file( input_path=args.input_path ,encoding= args.encoding ,dot_attrs=args.dot_attrs ) init_parser__level2(_handler_2_, parser_E_file) def init_parser_Exec_or_Eval_stdin(parser_E_stdin): def _handler_2_(E, args): return E.stdin( dot_attrs=args.dot_attrs ) init_parser__level2(_handler_2_, parser_E_stdin) def init_parser_Exec_or_Eval_arg(parser_E_arg): parser_E_arg.add_argument('python_source', type=str #, required=True , help='python source code' ) def _handler_2_(E, args): return E.stdin( python_source=args.python_source ,dot_attrs=args.dot_attrs ) init_parser__level2(_handler_2_, parser_E_arg) def init_parser__dot_attrs(parser): parser.add_argument('-da', '--dot_attrs', type=str , required=True , help='attrs to object; .XXX.YYY' ) if __name__ == "__main__": main()
UTF-8
Python
false
false
9,613
py
3,235
show_yacc_productions.py
2,478
0.605014
0.599709
0
267
34.996255
240
suraj93/IITM-Course-Codes
10,453,950,417,273
7dd364d8d1cff14995e272ac807673efb33a76b4
326940c9e5ca002ec8c3400e45cd6e3cb4c2b98c
/Computational Methods for EE/Assignment 1 - Polynomial Interpolation/q6/pypolint.py
6a15659abf43a61c029de9f56b583bdafb88c7fe
[]
no_license
https://github.com/suraj93/IITM-Course-Codes
d33de57b7b8759a8f56d77e6f00d3110cba4c5c6
ed0ca14cdff0341580122f0d4e1a196f1417e1e4
refs/heads/master
"2016-09-06T14:05:05.470723"
"2014-12-02T05:24:22"
"2014-12-02T05:24:22"
24,233,526
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np def polint(xa,ya,n,x): diffarr=np.array([(xai-x) for xai in xa]) diff=min(abs(diffarr)) ns=np.where(abs(diffarr)==diff)[0][0] c=np.array(ya) d=np.array(ya) y=ya[ns] for m in range(1,(n)): ho=diffarr[0:n-m] hp=diffarr[m:n] # print c # print d w=c[1:n-m+1]-d[0:n-m] den=ho-hp den=w*(1./den) c=ho*den d=hp*den if 2*ns < n-m: dy=c[ns] else: ns-=1 dy=d[ns] y+=dy # print "%d %f %d %d" %(m,y,ns,n-m) return [y,dy] def findnearest(xa,n,x): diffarr=np.array([(xai-x) for xai in xa]) len_diffarr=len(diffarr) diff=min(abs(diffarr)) ns=np.where(abs(diffarr)==diff)[0][0] if abs(diff) <1: if diff<=0.5: lower=ns-np.floor((n-1)/2)-1 upper=ns+np.ceil((n)/2)-1 else: lower=ns-np.floor((n-1)/2) upper=ns+np.ceil((n)/2) else: lower=ns-np.floor((n-1)/2) upper=ns+np.ceil((n)/2) if lower<0: upper+=(-1*lower) lower=0 elif upper>(len_diffarr-1): lower-=(upper-len_diffarr+1) upper=len_diffarr-1 upper+=1 return [int(lower),int(upper)] def nearestpolint(xa,ya,n,xx): y=np.zeros_like(xx) dy=np.zeros_like(xx) for i in range(0,len(xx)): x=xx[i] [lower,upper]=findnearest(xa,n,x) [y[i],dy[i]]=polint(xa[lower:upper],ya[lower:upper],n,x) return [y,dy] # xa=[1,2,3,4,5]; # ya=[1,2,3,2,1]; # n=5; # x=1.2 # [lower,upper] = findnearest(xa,n,x) # print xa[lower:upper] # print ya[lower:upper] # [y,dy]=polint(xa[lower:upper],ya[lower:upper],n,x) # print y # print dy
UTF-8
Python
false
false
1,729
py
64
pypolint.py
25
0.504916
0.477733
0
75
22.066667
64
jameschanwz/LegalDocGenerator
17,093,969,848,416
d9ef1e7a9acaf6674c2aa5e8ab3f7ad4747a4df6
c9f9b19837c96f51d63ace524836110abb51d09f
/app/email.py
98b843e4e6def342be3e403b3cb3095dbeb1649b
[]
no_license
https://github.com/jameschanwz/LegalDocGenerator
7856fcae2dd56355d42a75248b977ef423fb768d
5225353bf12cbc0871b0ad5ef7101680bb8da143
refs/heads/main
"2023-05-08T12:34:49.127672"
"2021-06-02T07:21:11"
"2021-06-02T07:21:11"
347,832,170
1
0
null
false
"2021-03-30T09:57:25"
"2021-03-15T04:07:04"
"2021-03-28T13:20:15"
"2021-03-30T09:57:25"
49
0
0
1
Python
false
false
from flask_mail import Message from app import mail def send_email(subject,sender, recipients, text_body,html_body): msg = Message(subject, sender=sender, recipients=recipients) msg.body = text_body msg.html = html_body mail.send(msg)
UTF-8
Python
false
false
252
py
8
email.py
6
0.730159
0.730159
0
8
30.625
65
pong-xxyz/SaRA
9,251,359,575,191
13e7d4b2770f3183b2db443554d1d23daf102834
e5b51b92430a7f13f81ebec251c4b53bea730c21
/Pi_navigatorClient.py
f5347a71011f34f449b54610772ff302b79b22b0
[]
no_license
https://github.com/pong-xxyz/SaRA
27cf1267a977fffa5d471c32985a765bdfda25eb
4f72248c583cd0922e25cd3336bb063316bb75db
refs/heads/master
"2021-01-22T20:29:12.482725"
"2017-03-17T15:22:11"
"2017-03-17T15:22:11"
85,325,580
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import socket import time import RPi.GPIO as gpio import sys host = 'xxx.xxx.xx.xxx' #address of the pi in the Lan, find through ifonfig port = xxxx #random unused port, ie above 1000 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind((host,port)) #socket for getting data from surf program..ie centroid host2 = 'xxx.xxx.xx.xx' #address of the pi in the Lan, find through ifonfig port2 = xxx #random unused port, diff from previous one , ie above 1000 e = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) e.bind((host2,port2)) #socket for getting distance from ultransonic sensor program e.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1) #sets the buffer queue length to 1, ie. dont need to save previous values more than one in number def init(): gpio.setmode(gpio.BOARD) gpio.setup(19, gpio.OUT) gpio.setup(11, gpio.OUT) gpio.setup(13, gpio.OUT) gpio.setup(15, gpio.OUT) def rotateleft(tf): init() gpio.output(19,True) gpio.output(11,False) gpio.output(13,True) gpio.output(15,False) time.sleep(tf) gpio.cleanup() def rotateright(tf): init() gpio.output(19,False) gpio.output(11,True) gpio.output(13,False) gpio.output(15,True) time.sleep(tf) gpio.cleanup() def foreward(tf): init() gpio.output(19,False) gpio.output(11,False) gpio.output(13,True) gpio.output(15,True) time.sleep(tf) gpio.cleanup() def search(): while True: print "searching .." rotateright(0.030) data, addr = s.recvfrom(1024) #get centroid form surf program as string end = len(data) clon = data[1:(end-1)] p, q = map(float, clon.split(',')) # string decoded into float if (p == -1)&(q == -1): pass #if object not found do nothing and go to next iteration else: #if object found start to verify count = 0 r = time.time() t = time.time() while t-r < 1.5: #verification period = 1.5 sec, t=stop, r=start times respectively print "verifying" data, addr = s.recvfrom(1024) end = len(data) clon = data[1:(end-1)] p, q = map(float, clon.split(',')) if p!=-1 and q!=-1: #object found count = count+1 #increment count if obj found t = time.time() #stop time increases till t-r < 1.5, ie. verification runs till 1.5 sec if count > 5: #if object found more than 5 time in verify period, search phase complete print "verified" break else: pass return 2 def allignment(): flag = 0 lag = 0 while True: data, addr = s.recvfrom(1024) end = len(data) clon = data[1:(end-1)] p, q = map(float, clon.split(',')) if p ==-1 and flag==0: #if obj dissapears durng allignment start timer start = time.time() flag = 1 if p ==-1 and flag!=0: #if object not found still , increment counter stop = time.time() lag = stop - start # lag gives time elapsed for which object dosent appear once if lag > 5: #if object dissapears for more than 5 sec search again return 1 if p<283.0 and p!=-1: print "rotating left" rotateleft(0.030) start = time.time() flag = 0 #if the object appears even once reset timer if p>339.0: print "rotating right" rotateright(0.030) start = time.time() flag = 0 if 283 < p < 339: print 'aligned' start = time.time() break #if alligned break and go to ram(), to move closer to the target return 3 def ram(): datak, adres= e.recvfrom(1024) d = float(datak) #get distance from ultrasonic sensor program print d flag=0 lg=0 while d>10: data, addr = s.recvfrom(1024) end = len(data) clon = data[1:(end-1)] p, q = map(float, clon.split(',')) datak, adres = e.recvfrom(1024) d = float(datak) print d if 283 < p < 339: #if still alligned move foreward foreward(0.030) flag=0 elif not(283 < p < 339) and p!=-1: #if allignment lost goto allignment() r = allignment() flag=0 else: #if object lost for more than 5 sec go back to search() if flag==0: st = time.time() flag=1 else: en = time.time() lg = en-st if lg>5: return 1 return 4 def pickup(): #pickup rputine to be written print "pickup routine" time.sleep(5) return 5 def fetch(): print "fetching routine" time.sleep(5) return 6 def Main(): c = 1 try: while True: if c==1: c = search() if c==2: c = allignment() if c==3: c = ram() if c==4: c = pickup() if c==5: c = fetch() if c==6: s.close() e.close() gpio.cleanup() print "port closed" break except KeyboardInterrupt: s.close() e.close() gpio.cleanup() print "port closed" if __name__=='__main__': Main()
UTF-8
Python
false
false
6,570
py
3
Pi_navigatorClient.py
3
0.448706
0.42207
0
206
30.543689
159
MnM3882/Laboratorio-remoto
3,908,420,274,320
8ebe36ac1cfa63e06389959523ed67d1400498d4
70fe9c43026169aca02144a9a7f47e074185783a
/SerialCommunication y GUI/180404Lab_remoto .py
eaa482f36058f76befe43782bbee2684a6291051
[]
no_license
https://github.com/MnM3882/Laboratorio-remoto
55be641b3633f435ee52bb55cf7a0bab439cb01d
a8376d98fb34bbfc7c667c08bfa4c43f8c5bd021
refs/heads/master
"2021-09-27T16:04:06.850998"
"2018-11-09T11:21:48"
"2018-11-09T11:21:48"
126,108,805
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
import os import demoscope3 from tkinter import * from tkinter import ttk, Canvas master = Tk() master.title("Laboratorio remoto- LFN USB") master.geometry('700x700') top_frame = Frame(master, bg='white', width = 700, height=700, padx=10, pady=10) #top_frame.grid(column=0, row=0) top_frame.pack(side= TOP, fill=BOTH) lbl = Label(top_frame, text="Laboratorio remoto - Espectrometría gamma", font=("Times New Roman", 20),bg='white') lbl.pack(side=TOP, padx=10, pady=10) #left_frame = Frame(master, bg='white', width = 300) #left_frame.grid(column=0, row=5) #left_frame.pack(side= LEFT, fill=BOTH) #right_frame = Frame(master, bg='white', width = 300) #right_frame.grid(column=10, row=5) #right_frame.pack(side= RIGHT, fill=BOTH) #center_frame=Frame(master, bg='white', width=500, height=1000) #center_frame.pack(side=TOP, fill=X) #Para probar que los indices de los obstáculos sean los correctos def cambiar_obstaculo(): demoscope3.move_noria(obstaculo.get()) def def_distancia(): demoscope3.move_Zahlrohr_sharp(distancia.get()) #print(distancia.get()) #Lista de obstáculos de la noria obstaculo = IntVar() lbl_obs=Label(top_frame, text="Seleccione el atenuador que desea\n interponer entre la muestra y el detector", font=("Times New Roman", 12),bg='white', padx=10,pady=10) lbl_obs=lbl_obs.pack(side= TOP, fill=Y) #lbl_obs.grid(pady=20,column=0, row=2) # left_canvas=Canvas(left_frame) # left_canvas.create_line(15, 25, 200, 25) # left_canvas.grid(column=0, row=2) obs0 = Radiobutton(top_frame,text='Sin atenuador', value=0, variable=obstaculo, bg='white',font=("Times New Roman", 12)) obs1 = Radiobutton(top_frame,text='Al 2.550cm', value=1, variable=obstaculo,bg='white',font=("Times New Roman", 12)) obs2 = Radiobutton(top_frame,text='Pb 0.080cm', value=2, variable=obstaculo,bg='white',font=("Times New Roman", 12)) obs4 = Radiobutton(top_frame,text='Al 0.935cm', value=4, variable=obstaculo,bg='white',font=("Times New Roman", 12)) obs5 = Radiobutton(top_frame,text='Pb 0.320cm', value=5, variable=obstaculo,bg='white',font=("Times New Roman", 12)) obs6 = Radiobutton(top_frame,text='Al 0.450cm ', value=6, variable=obstaculo,bg='white',font=("Times New Roman", 12)) obs7 = Radiobutton(top_frame,text='Pb 0.160cm', value=7, variable=obstaculo,bg='white',font=("Times New Roman", 12)) btn = Button(top_frame, text="Cambiar obstáculo", command=cambiar_obstaculo, font=("Times New Roman", 12)) obs0.pack(side= TOP, fill=Y) obs1.pack(side= TOP, fill=Y) obs2.pack(side= TOP, fill=Y) obs4.pack(side= TOP, fill=Y) obs5.pack(side= TOP, fill=Y) obs6.pack(side= TOP, fill=Y) obs7.pack(side= TOP, fill=Y) btn.pack(side= TOP, fill=Y, pady=10) # obs0.grid(padx=2, pady=2, column=0, row=4) # obs1.grid(padx=2, pady=2, column=0, row=5) # obs2.grid(padx=2, pady=2, column=0, row=6) # obs4.grid(padx=2, pady=2, column=0, row=7) # obs5.grid(padx=2, pady=2, column=0, row=8) # obs6.grid(padx=2, pady=2, column=0, row=9) # obs7.grid(padx=2, pady=2, column=0, row=10) # btn.grid(padx=2, pady=2, column=0, row=12) #Lista de distancias distancia=StringVar() lbl_distancia=Label(top_frame, text="Defina la distancia en cm\nentre el detector y la muestra", font=("Times New Roman", 12),bg='white') lbl_distancia.pack(side= TOP, fill=Y) dist_array=['8.3','10','11.7','13.4','15.1','16.8','18.5','20.2','21.9','23.6','25.3','27','28.7','30.4','32.1','33.8'] #lbl_distancia.grid(column=0, row=15, pady=20) dist_menu=OptionMenu(top_frame, distancia, *dist_array) distancia.set('8.3') dist_menu.pack(side=TOP, fill=Y, pady=10) # distancia = Spinbox(top_frame, from_=0, to=5, width=5) # distancia.pack(side= TOP, fill=Y,pady=10) #distancia.grid(pady=10,column=0,row=20) #lbl_cm=Label(left_frame, text="cm",font=("Times New Roman", 12),bg='white') #lbl_cm.grid(column=1,row=20) btn = Button(top_frame, text="Seleccionar", command=def_distancia, font=("Times New Roman", 12)) btn.pack(side= TOP, fill=Y,pady=10) #btn.grid(padx=2, pady=2, column=0, row=22) Nbuttons = 6 def buttonfunction(index): for i in range(Nbuttons): buttons[i].config(state="disabled") if index == 0: demoscope3.scope() elif index == 1: demoscope3.storedata(300) elif index == 2: demoscope3.plotsignal() elif index == 3: demoscope3.plotfourier() elif index == 4: demoscope3.plothistogram() elif index == 5: os.system('clear') exit() for i in range(Nbuttons): buttons[i].config(state="active") button_names = ['Plotear en tiempo real', 'Adquirir y almacenar', 'Graficar senal adquirida', 'Transformada de Fourier', 'Histograma', 'Salir'] buttons = [] for index in range(Nbuttons): n=button_names[index] button = Button(top_frame, bg="White", text=n, relief=GROOVE, command=lambda index=index, n=n: buttonfunction(index),font=("Times New Roman", 12)) # Add the button to the window button.pack(side=TOP, fill=Y) # Add a reference to the button to 'buttons' buttons.append(button) mainloop()
UTF-8
Python
false
false
4,921
py
158
180404Lab_remoto .py
8
0.70002
0.645719
0
121
39.636364
150
goddess5321/classifier_gar
8,108,898,301,227
79d6f4fe3eaf57ab4ff6ca369cf08926bcf5477a
0679d3e3b0725c9c912f2d60117c027dbd064166
/data_enhance.py
089a3bd1120f740531324338707189805a4e28c3
[]
no_license
https://github.com/goddess5321/classifier_gar
8ca14459dccb9b3b47d4f5d093af04e3c954183e
86b3522ad9d545816cb5e56fa5e9edf5a9140e67
refs/heads/master
"2022-02-18T17:20:58.982776"
"2019-08-28T02:18:22"
"2019-08-28T02:18:22"
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import torch as t from torchvision import transforms import random def image_transforms(size=224): fr_transforms = transforms.Compose([ transforms.Resize(size), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.RandomRotation(45), #transforms.ColorJitter(brightness=0.05, contrast=0.1, saturation=0.3, hue=0.2), transforms.ToTensor(), transforms.Normalize(mean=(0.5,0.5,0.5), std=(0.5,0.5,0.5)) ]) return fr_transforms def enhance_transforms(): possibility = random.randint(0, 19) if possibility % 20 ==0: output_transforms = transforms.Compose([ transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=(0.5,0.5,0.5), std=(0.5,0.5,0.5))]) return output_transforms else: possibility2 = random.randint(0, 3) if possibility2%5==0: return image_transforms(224) elif possibility2%5==1: return image_transforms(245) elif possibility2%5==2: return image_transforms(274) elif possibility2%5==3: return image_transforms(316) def transform_standard(): output_transforms = transforms.Compose([ transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=(0.5,0.5,0.5), std=(0.5,0.5,0.5)) ]) return output_transforms
UTF-8
Python
false
false
1,788
py
1
data_enhance.py
1
0.536353
0.481544
0
47
36.914894
104
renjch/PythonTeachingProgram
10,874,857,212,895
8801192f93ae582743fae3ad815b978fcbf793d1
012793075896cc2a3db3c85b23258166567e94f8
/tutor/hello-world-third-edition/Listing_13-4_creating_and_using_a_function_that_returns_a_value.py
b341d9587a1847ca7237f33a89ee915363d015ca
[]
no_license
https://github.com/renjch/PythonTeachingProgram
64e8b7aebd6624dd61213a5c093e1ac6dd071c35
7d14fe0719a19f674664e03bb69e7a43547ac8d5
refs/heads/master
"2021-01-01T01:09:05.091989"
"2020-12-31T04:02:46"
"2020-12-31T04:02:46"
239,111,742
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Listing_13-4_creating_and_using_a_function_that_returns_a_value.py # Copyright Warren & Carter Sande, 2009-2019 # Released under MIT license https://opensource.org/licenses/mit-license.php # ------------ # Function calculates tax and returns total def calculateTax(price, tax_rate): total = price + (price * tax_rate) return total # Sends result back to the main program my_price = float(input("Enter a price: ")) totalPrice = calculateTax(my_price, 0.06) # Calls function and stores the result in `totalPrice` print("price = ", my_price, " Total price = ", totalPrice)
UTF-8
Python
false
false
587
py
233
Listing_13-4_creating_and_using_a_function_that_returns_a_value.py
224
0.710392
0.686542
0
14
40.928571
97
gmdsam/perched-peacock-api
5,523,327,965,384
8fbdbb6a04ef7e153fac0f041a990d628de44150
4d2eb0c849892d9d43d239be5a687bc5c297d1f0
/parking/core/models/parking_entry.py
5db3e9cafa47857abb70cf099f177cbd3a04f10c
[]
no_license
https://github.com/gmdsam/perched-peacock-api
7a5599a0aeb59b5b7ee6427ce1e89d83404aa1b3
37b5edfa015d5dc44dd08083ef88898566c7cbb0
refs/heads/master
"2020-08-12T00:32:21.473532"
"2019-10-15T00:31:33"
"2019-10-15T00:31:33"
214,656,755
0
0
null
false
"2019-10-15T00:31:34"
"2019-10-12T14:01:20"
"2019-10-15T00:27:54"
"2019-10-15T00:31:33"
18
0
0
0
Python
false
false
class ParkingEntryModel: def __init__(self, arrival_date, arrival_time, departure_date, departure_time, vehicle_type, vehicle_number=None): self._arrival_date = arrival_date self._arrival_time = arrival_time self._departure_date = departure_date self._departure_time = departure_time self._vehicle_type = vehicle_type self._vehicle_number = vehicle_number
UTF-8
Python
false
false
409
py
15
parking_entry.py
11
0.677262
0.677262
0
9
44.444444
118
azad17/api_test
10,557,029,624,916
1acb37ff430a04a4f2e396789a71f2a373917d43
3e552827febbef77de1e0c1e6d4de693c24565d3
/global_api/api/v1/views.py
9416e9aaa13074422d649386e0a1ccb97f420e37
[]
no_license
https://github.com/azad17/api_test
72c1b64abc07e838a1aada051631bf64c77cd16b
48b526c1a2cb4c71e6258814572be417f664aef4
refs/heads/main
"2023-08-21T14:22:24.325261"
"2021-09-22T10:11:15"
"2021-09-22T10:11:15"
408,840,662
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import datetime as dt import json import time import urllib from collections import defaultdict from rest_framework import status from rest_framework.response import Response from rest_framework.views import APIView SHIFT_API_URL = 'https://gitlab.com/-/snippets/2094509/raw/master/sample_json_1.json' MACHINE_API_URL = 'https://gitlab.com/-/snippets/2094509/raw/master/sample_json_2.json' BELT_API_URL = 'https://gitlab.com/-/snippets/2094509/raw/master/sample_json_3.json' '''creating a class to calculate the shift count''' class ShiftCountApi(APIView): def get(self, request): #checking the user queries in a try block try: start_time = dt.datetime.strptime(request.query_params.get("start_time"), "%Y-%m-%dT%H:%M:%SZ") end_time = dt.datetime.strptime(request.query_params.get("end_time"), "%Y-%m-%dT%H:%M:%SZ") except TypeError: return Response({}, status=status.HTTP_400_BAD_REQUEST) result = urllib.request.urlopen(SHIFT_API_URL) data = json.loads(result.read()) #lambda function to filter out values in the given time intervals filter_date_range = lambda x: True if start_time <= dt.datetime.strptime( x['time'], "%Y-%m-%d %H:%M:%S") < end_time else False filtered_data = filter(filter_date_range, data) shifts = {"shiftA": { "start": dt.time(6), "end": dt.time(14) }, "shiftB": { "start": dt.time(14), "end": dt.time(20) }, "shiftC": { "start": dt.time(20), "end": dt.time(6) }, } shift_count = { "shiftA": {"production_A_count": 0, "production_B_count": 0}, "shiftB": {"production_A_count": 0, "production_B_count": 0}, "shiftC": {"production_A_count": 0, "production_B_count": 0}, } for shift in filtered_data: time1 = dt.datetime.strptime(shift['time'], "%Y-%m-%d %H:%M:%S").time() if shifts['shiftA']['start'] < time1 < shifts['shiftA']['end']: shift_count['shiftA']['production_A_count'] = shift_count['shiftA']['production_A_count'] + 1 if shift[ 'production_A'] else shift_count['shiftA']['production_A_count'] shift_count['shiftA']['production_B_count'] = shift_count['shiftA']['production_B_count'] + 1 if shift[ 'production_B'] else shift_count['shiftA']['production_B_count'] elif shifts['shiftB']['start'] < time1 < shifts['shiftB']['end']: shift_count['shiftB']['production_A_count'] = shift_count['shiftB']['production_A_count'] + 1 if shift[ 'production_A'] else shift_count['shiftB']['production_A_count'] shift_count['shiftB']['production_B_count'] = shift_count['shiftB']['production_B_count'] + 1 if shift[ 'production_B'] else shift_count['shiftB']['production_B_count'] else: shift_count['shiftC']['production_A_count'] = shift_count['shiftC']['production_A_count'] + 1 if shift[ 'production_A'] else shift_count['shiftC']['production_A_count'] shift_count['shiftC']['production_B_count'] = shift_count['shiftC']['production_B_count'] + 1 if shift[ 'production_B'] else shift_count['shiftC']['production_B_count'] return Response(shift_count) '''class to find out the total run time, downtime and machine utilization in a given time inetrval''' class MachineUtilization(APIView): def get(self, request): try: start_time = dt.datetime.strptime(request.query_params.get("start_time"), "%Y-%m-%dT%H:%M:%SZ") end_time = dt.datetime.strptime(request.query_params.get("end_time"), "%Y-%m-%dT%H:%M:%SZ") except TypeError: return Response({}, status=status.HTTP_400_BAD_REQUEST) result = urllib.request.urlopen(MACHINE_API_URL) data = json.loads(result.read()) filter_daterange = lambda x: True if start_time < dt.datetime.strptime( x['time'], "%Y-%m-%d %H:%M:%S") < end_time else False filtered_data = filter(filter_daterange, data) max_runtime = 1021 machine_data = { 'runtime': 0, 'downtime': 0, 'utilisation': 0 } for data in filtered_data: machine_data['runtime'] += data['runtime'] if data['runtime'] > max_runtime: machine_data['downtime'] += (data['runtime'] - max_runtime) machine_data['utilisation'] = (machine_data['runtime']) / ( machine_data['runtime'] + machine_data['downtime']) * 100 machine_data['runtime'] = time.strftime("%Hh:%Mm:%Ss", time.gmtime(machine_data['runtime'])) machine_data['downtime'] = time.strftime("%Hh:%Mm:%Ss", time.gmtime(machine_data['downtime'])) return Response(machine_data) ''' a class to find out average of belt1 and belt2 in the given time interval''' class BeltAverage(APIView): def get(self, request): try: start_time = dt.datetime.strptime(request.query_params.get("start_time"), "%Y-%m-%dT%H:%M:%SZ") end_time = dt.datetime.strptime(request.query_params.get("end_time"), "%Y-%m-%dT%H:%M:%SZ") except TypeError: return Response({}, status=status.HTTP_400_BAD_REQUEST) result = urllib.request.urlopen(BELT_API_URL) data = json.loads(result.read()) filter_daterange = lambda x: True if start_time <= dt.datetime.strptime( x['time'], "%Y-%m-%d %H:%M:%S") <= end_time else False filtered_data = filter(filter_daterange, data) total_data = defaultdict(dict) for data in filtered_data: if data['state'] == True: data['belt1'] = 0 else: data['belt2'] = 0 if data['id'][-1] not in total_data.keys(): total_data[data['id'][-1]]['avg_belt1'] = data['belt1'] total_data[data['id'][-1]]['avg_belt2'] = data['belt2'] total_data[data['id'][-1]]['count'] = 1 total_data[data['id'][-1]]['id'] = data['id'][-1] else: total_data[data['id'][-1]]['avg_belt1'] += data['belt1'] total_data[data['id'][-1]]['avg_belt2'] += data['belt2'] total_data[data['id'][-1]]['count'] += 1 total_data[data['id'][-1]]['id'] = data['id'][-1] #creating a dictionary with the output values output_data = [ { "id": v['id'], "avg_belt1": (v['avg_belt1'] / v['count']), "avg_belt2": (v['avg_belt2'] / v['count'])} for k, v in sorted(total_data.items()) ] return Response(output_data)
UTF-8
Python
false
false
6,998
py
2
views.py
2
0.553158
0.539011
0
154
44.435065
119
mullenkamp/ecan-water-reports
17,231,408,794,835
a0ac5a8483e5b693bfafb3d72269649e98404e03
016ffb3019de50a03230a7bde6ce164fea75fc9f
/core/util.py
e49298b9f46029d6c92ec8ca08d8740eb3eac703
[ "Apache-2.0" ]
permissive
https://github.com/mullenkamp/ecan-water-reports
0b9a44a49304315e80ff9903f7365e755b61bd5f
ca6b903e0df8889e81fa1efc31abc8c9e25f04d4
refs/heads/master
"2021-04-03T08:59:34.848743"
"2021-02-16T00:18:18"
"2021-02-16T00:18:18"
125,106,083
0
2
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- """ Created on Wed Aug 8 13:07:42 2018 @author: MichaelEK """ import pandas as pd import geopandas as gpd def grp_ts_agg(df, grp_col, ts_col, freq_code): """ Simple function to aggregate time series with dataframes with a single column of sites and a column of times. Parameters ---------- df : DataFrame Dataframe with a datetime column. grp_col : str or list of str Column name that contains the sites. ts_col : str The column name of the datetime column. freq_code : str The pandas frequency code for the aggregation (e.g. 'M', 'A-JUN'). Returns ------- Pandas resample object """ df1 = df.copy() if type(df[ts_col].iloc[0]) is pd.Timestamp: df1.set_index(ts_col, inplace=True) if type(grp_col) is list: grp_col.extend([pd.Grouper(freq=freq_code)]) else: grp_col = [grp_col, pd.Grouper(freq=freq_code)] df_grp = df1.groupby(grp_col) return (df_grp) else: print('Make one column a timeseries!') def multipoly_to_poly(geodataframe): """ Function to convert a GeoDataFrame with some MultiPolygons to only polygons. Creates additional rows in the GeoDataFrame. Parameters ---------- geodataframe: GeoDataFrame Returns ------- GeoDataFrame """ gpd1 = geodataframe.copy() gpd2 = gpd.GeoDataFrame() for i in gpd1.index: geom1 = gpd1.loc[[i]] geom2 = geom1.loc[i, 'geometry'] if geom2.type == 'MultiPolygon': polys = [j for j in geom2] new1 = geom1.loc[[i] * len(polys)] new1.loc[:, 'geometry'] = polys else: new1 = geom1.copy() gpd2 = pd.concat([gpd2, new1]) return gpd2.reset_index(drop=True) def tsreg(ts, freq=None, interp=False): """ Function to regularize a time series object (pandas). The first three indeces must be regular for freq=None!!! ts -- pandas time series dataframe.\n freq -- Either specify the known frequency of the data or use None and determine the frequency from the first three indices.\n interp -- Should linear interpolation be applied on all missing data? """ if freq is None: freq = pd.infer_freq(ts.index[:3]) ts1 = ts.resample(freq).mean() if interp: ts1 = ts1.interpolate('time') return ts1 def getPolyCoords(row, coord_type, geom='geometry'): """Returns the coordinates ('x' or 'y') of edges of a Polygon exterior""" # Parse the exterior of the coordinate exterior = row[geom].exterior if coord_type == 'x': # Get the x coordinates of the exterior return list(exterior.coords.xy[0]) elif coord_type == 'y': # Get the y coordinates of the exterior return list(exterior.coords.xy[1])
UTF-8
Python
false
false
2,875
py
76
util.py
6
0.610087
0.595826
0
102
27.186275
125
ChMcg/oop-labs
7,378,753,816,783
48519ebe24b0391862e4d8451a71cb0c33c22c9f
2ecae2d06656e5677faf7d347d7137c9d0809830
/src_07/App.py
90cbb792f2eadca942f3f07e78c1e1be46c3161d
[]
no_license
https://github.com/ChMcg/oop-labs
ee6fed35539341e75fe1d4ee73c1fcd7586f5674
ca321941540ab3fd40e86ab0d090fc4f0ae559ec
refs/heads/master
"2022-08-22T19:23:47.029503"
"2020-05-25T16:52:47"
"2020-05-25T16:52:47"
258,827,971
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from src_07.ui.lab_7_ui import Ui_Form as MainDialog from src_07.MyWidget import MyWidget from src_07.Drawable import GridType, DrawableType from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QApplication, QWidget from PyQt5.QtCore import QRect class MainDialog_mock(MainDialog): def __init__(self): super().__init__() # self.old_setupUi = self.setupUi def setupUi(self, Form): # self.old_setupUi(Form) super().setupUi(Form) self.widget = MyWidget(self) self.widget.setGeometry(QRect(20, 20, 481, 521)) self.widget.setObjectName("widget") class MyWindow(QWidget): def __init__(self): super(MyWindow, self).__init__() self.ui = MainDialog() self.ui.setupUi(self) self.setup_custom_classes() # ---- figure selection ---- self.ui.rectangle.clicked.connect(self.updateFugureType) self.ui.square.clicked.connect(self.updateFugureType) self.ui.circle.clicked.connect(self.updateFugureType) self.ui.ellipse.clicked.connect(self.updateFugureType) # ---- figure parameters update ---- self.ui.line_a.valueChanged.connect(self.updateParameters) self.ui.line_b.valueChanged.connect(self.updateParameters) self.ui.radius.valueChanged.connect(self.updateParameters) # ---- grid selection ---- self.ui.random.clicked.connect(self.updateGrid) self.ui.random.clicked.connect(self.updateGridSelection) self.ui.regular.clicked.connect(self.updateGrid) self.ui.regular.clicked.connect(self.updateGridSelection) # ---- clean button ---- self.ui.clean.clicked.connect(self.cleanup) # ---- update grid button ---- self.ui.generate_new.clicked.connect(self.updateGrid) # ---- grid parameters updated ---- self.ui.dense_x.valueChanged.connect(self.updateGrid) self.ui.dense_x.valueChanged.connect(self.balanceByX) self.ui.dense_y.valueChanged.connect(self.updateGrid) self.ui.dense_y.valueChanged.connect(self.balanceByY) # ---- update initial settings ---- self.updateFugureType() self.updateParameters() self.updateGridSelection() self.updateGrid() # ---- custom signals ---- self.widget.objectAdded.connect(self.updateInfo) def setup_custom_classes(self): self.widget = MyWidget(self) # self.widget.setGeometry(QRect(10, 30, 500, 500)) self.widget.setGeometry(QRect(11, 31, 498, 498)) def keyPressEvent(self, e): if e.key() == Qt.Key_Escape: self.close() def balanceByX(self): if self.ui.balance.isChecked(): self.ui.dense_y.setValue(self.ui.dense_x.value()) def balanceByY(self): if self.ui.balance.isChecked(): self.ui.dense_x.setValue(self.ui.dense_y.value()) def updateGrid(self): x = int(self.ui.dense_x.value()) y = int(self.ui.dense_y.value()) if self.ui.regular.isChecked(): self.widget.updateGrid(x, y, GridType.REGULAR) self.updateInfo() elif self.ui.random.isChecked(): self.widget.updateGrid(x, y, GridType.RANDOM) self.updateInfo() def updateInfo(self): x = int(self.ui.dense_x.value()) y = int(self.ui.dense_y.value()) info = self.widget.getIntersectResult() current, total = info gt = self.widget.getGridType() self.ui.info.setText(f"Сетка: ({x}, {y}) [{gt}]\n" f"Заплнено: {current}/{total} ({(1-current/total)*100:.2f}%)") def updateGridSelection(self): if self.ui.regular.isChecked(): self.ui.generate_new.setEnabled(False) else: self.ui.generate_new.setEnabled(True) def updateParameters(self): self.widget.setCurrentParameters( a=self.ui.line_a.value(), b=self.ui.line_b.value(), radius=self.ui.radius.value() ) def updateFugureType(self): if self.ui.rectangle.isChecked(): self.widget.setCurrentFigure(DrawableType.RECTANGLE) self.ui.line_a.setEnabled(True) self.ui.line_b.setEnabled(True) self.ui.radius.setEnabled(False) if self.ui.square.isChecked(): self.widget.setCurrentFigure(DrawableType.SQUARE) self.ui.line_a.setEnabled(True) self.ui.line_b.setEnabled(False) self.ui.radius.setEnabled(False) if self.ui.circle.isChecked(): self.widget.setCurrentFigure(DrawableType.CIRCLE) self.ui.line_a.setEnabled(False) self.ui.line_b.setEnabled(False) self.ui.radius.setEnabled(True) if self.ui.ellipse.isChecked(): self.widget.setCurrentFigure(DrawableType.ELLIPSE) self.ui.line_a.setEnabled(True) self.ui.line_b.setEnabled(True) self.ui.radius.setEnabled(False) def cleanup(self): print('cleaning') self.widget.cleanup() class MyApp(): def exec(self) -> int: app = QApplication([]) app.setStyle('Fusion') window = MyWindow() window.show() return app.exec()
UTF-8
Python
false
false
5,322
py
49
App.py
38
0.613863
0.605387
0
143
36.125874
91
Liberty3000/plato
15,144,054,729,690
5e19307c0294673a52ddfe2286881f7bf82fcb55
cfce9c7fd43f48c28740e76cb02313adbb5624bd
/plato/environment.py
54be5c197a539b75cbdb7635fcfb997cf928413f
[]
no_license
https://github.com/Liberty3000/plato
eaa6ae5d53cb3c8c92ced13241c18aca9790cb43
db9a68bcd78bbcfd4791f980e4abc9cbbffbd870
refs/heads/master
"2020-09-07T16:21:43.476285"
"2019-12-15T03:40:54"
"2019-12-15T03:40:54"
220,841,689
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import gym, random, os, time, tqdm import numpy as np, matplotlib.pyplot as mp from plato.actions import attack, patrol from plato.features import global_features from plato.entity.util import plot_entities, randomize_entities from plato.objective.util import plot_objectives, encode_objectives from plato.router import pathfinder from plato.terrain import perlin from plato.util import coverage, quantize_area class Environment(gym.Env): def __init__(self, config=None, shape=(64,64), objectives=[], controlled=[], time_limit=2**14): self.config, self.shape = config, shape self.state_space = (len(global_features), *shape) self.pathfinder = pathfinder() self.global_reward, self.local_rewards = 0, {} self.objectives = objectives self.controlled = controlled self.time_limit = time_limit def initialize_terrain(self): self.air = np.zeros(self.shape) self.sea = np.zeros(self.shape) self.land= perlin(self.shape[0], self.shape[1], seed=0) def reset(self): self.state, self.reward, self.timer = None,0,0 cfg = self.config['whites'] init_params = cfg['params'].values() init_area = quantize_area(cfg['init'], self.shape) self.whites = randomize_entities(*init_params, init_area, 'whites') cfg = self.config['blacks'] init_params = cfg['params'].values() init_area = quantize_area(cfg['init'], self.shape) self.blacks = randomize_entities(*init_params, init_area, 'blacks') self.white_detections, self.black_detections = [],[] self.white_casualties, self.black_casualties = [],[] self.initialize_terrain() return self.observation(self.whites, self.blacks) def terrain(self): return np.asarray([self.sea, self.land, self.air]) def observation(self, positive, negative=None): minimap = np.zeros((len(global_features),*self.shape)) minimap[global_features.index('land'),:]= self.land minimap[global_features.index('air'),:] = self.air minimap[global_features.index('sea'),:] = self.sea for ent in positive.values(): x,y = ent.xy[0],ent.xy[1] minimap[global_features.index(ent.entity_type),x,y] += 1 if ent.weapons: weapon_model = ent.weapons[0] minimap[global_features.index('weapon_power'), x,y] += weapon_model['weapon_power'] for xy in coverage(ent.xy, weapon_model['weapon_range'], self.shape): for (i,j) in coverage(xy, weapon_model['weapon_radius'], self.shape): minimap[global_features.index('weapon_radius'),i,j] += weapon_model['weapon_radius'] for (x,y) in coverage(ent.xy, weapon_model['weapon_range'], self.shape): minimap[global_features.index('weapon_range'),x,y] += weapon_model['weapon_range'] if ent.sensors: sensor_model = ent.sensors[0] minimap[global_features.index('sensor_coverage'),x,y]+= sensor_model['sensor_coverage'] for (x,y) in coverage(ent.xy, sensor_model['sensor_range'], self.shape): minimap[global_features.index('sensor_range'),x,y] += weapon_model['sensor_range'] minimap[global_features.index('durability'),x,y] += ent.properties['durability'] for (x,y) in coverage(ent.xy, ent.properties['mobility'], self.shape): minimap[global_features.index('mobility'),x,y] += (1 - ent.properties['mobility']) for (x,y) in coverage(ent.xy, ent.properties['visibility'], self.shape): minimap[global_features.index('visibility'),x,y] += ent.properties['visibility'] if any([ent.id in self.blacks.keys() for ent in positive.values()]): dets,kills,casts = self.white_detections,self.white_casualties,self.black_casualties else: dets,kills,casts = self.black_detections,self.black_casualties,self.white_casualties minimap = encode_objectives(self.objectives, minimap, self.white_detections, self.shape) for ent in dets: x,y = ent.xy minimap[global_features.index(ent.entity_type + '_detections'),x,y] += 1 for ent in kills: x,y = ent.xy minimap[global_features.index('positive_casualties'),x,y] += 1 for ent in casts: x,y = ent.xy minimap[global_features.index('negative_casualties'),x,y] += 1 return minimap def observe_detections(self): self.white_detections, self.black_detections = [],[] for white in self.whites.values(): for black in self.blacks.values(): operational = white.operational and black.operational white_cells = coverage(white.xy, white.properties['visibility'], self.shape) black_cells = coverage(black.xy, black.properties['visibility'], self.shape) if black.xy in white_cells and operational: self.white_detections += [black] if white.xy in black_cells and operational: self.white_detections += [white] self.white_detections = set(self.white_detections) self.black_detections = set(self.black_detections) def calculate_damage(self, damage_map, targets): casualities = [] for target in targets.values(): x,y = target.xy target.properties['durability'] -= damage_map[x,y] if target.properties['durability'] < 0: target.operational = False casualities.append(target) return set(casualities) def shape_reward(self): reward = 0 # reward kills for _ in self.black_casualties: reward += 0.2 # penalize casualties for _ in self.white_casualties: reward -= 0.2 # reward survival if all([not ent.operational for ent in self.blacks.values()]): reward += 1 # penalize extinction if all([not ent.operational for ent in self.whites.values()]): reward -= 1 # reward objectives reward += sum([obj.reward for obj in self.objectives]) return reward def termination_condition(self): terminal,status = False,'neutral' args = dict(entities=self.whites, enemies=self.blacks, timer=self.timer) terminal = any([obj(**args) for obj in self.objectives]) if terminal: status = 'success' if self.timer >= self.time_limit: terminal,status = True,'neutral' if all([not ent.operational for ent in self.whites.values()]): terminal,status = True,'failure' if all([not ent.operational for ent in self.blacks.values()]): terminal,status = True,'success' return terminal,status def step(self, white_actions={}, black_actions={}, verbose=False): self.timer += 1 metadata = {'positive_observables': None, # what we see 'negative_observables': None, # what they see 'fully_observable': None} # everything # white's actions positive_damage_map = np.zeros((*self.shape,)) for id_,action in white_actions.items(): entity = self.whites[id_] if entity.operational: if action in entity.patrol_actions: entity.xy = patrol(entity, action, self.terrain()) if action in entity.attack_actions: positive_damage_map = attack(entity, action, positive_damage_map) self.black_casualties = self.calculate_damage(positive_damage_map, self.blacks) # blacks's actions negative_damage_map = np.zeros((*self.shape,)) for id_,action in black_actions.items(): entity = self.blacks[id_] if entity.operational: if action in entity.patrol_actions: entity.xy = patrol(entity, action, self.terrain()) if action in entity.attack_actions: negative_damage_map = attack(entity, action, negative_damage_map) self.white_casualties = self.calculate_damage(negative_damage_map, self.whites) self.observe_detections() metadata['positive_observables'] = self.observation(self.whites, self.blacks) metadata['negative_observables'] = self.observation(self.blacks, self.whites) metadata[ 'positive_damage_map'] = positive_damage_map metadata[ 'negative_damage_map'] = negative_damage_map metadata[ 'positive_casualties'] = self.white_casualties metadata[ 'negative_casualties'] = self.black_casualties white_map = np.zeros((*self.shape,)) for ent in self.whites.values(): x,y = ent.xy[0],ent.xy[1] white_map[x,y] += 1 metadata['white_map'] = white_map black_map = np.zeros((*self.shape,)) for ent in self.blacks.values(): x,y = ent.xy[0],ent.xy[1] black_map[x,y] += 1 metadata['black_map'] = black_map self.global_reward = self.shape_reward() self.terminal, status = self.termination_condition() metadata['status'] = status return metadata['positive_observables'], self.global_reward, self.terminal, metadata def render(self, notebook=False, show=True, routes=False): fig,ax = mp.subplots(1,1,figsize=(10,10)) canvas = np.zeros((*self.shape,3)) ax.set_aspect('equal') controlled = [ent for ent in self.whites.values() if ent.controlled == True] canvas,ax,wpatches = plot_entities(self.whites, controlled, canvas, ax) canvas,ax,bpatches = plot_entities(self.blacks, [], canvas, ax) canvas,ax,opatches = plot_objectives(self.objectives, self.timer, canvas, ax) mp.title('PLATO Environment', size=16) ax.imshow(canvas.astype(np.uint8)) mp.show(block=False) mp.pause(1e-9) fig.canvas.draw_idle() try: fig.canvas.flush_events() except NotImplementedError: pass for p in wpatches: p.remove() for p in bpatches: p.remove() for p in opatches: p.remove() return canvas
UTF-8
Python
false
false
10,297
py
40
environment.py
19
0.610081
0.605322
0
240
41.904167
108
plovesm/filesorter
7,980,049,282,799
88b41f0d8ea3ff45f2f66fdcd916778021593783
1194525ce6479cd8f69ea69a2c00a648cb1aeecd
/app/db_util.py
bcc187be70bf3cb14e7d0bfa6a715e47a3025bde
[]
no_license
https://github.com/plovesm/filesorter
6bcb05b140ea48350f42c29d14c4d818d19ca7b3
34e8cbce2605569036417a404090da5803c12967
refs/heads/master
"2020-04-06T04:43:13.453405"
"2017-06-27T04:23:07"
"2017-06-27T04:23:07"
82,888,200
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Copyright 2017 # Author Paul Ottley import sqlite3 from objects import FSfile from datetime import date class DatabaseUtil: def __init__(self): self.conn = None @staticmethod def open_connection(name="test.db", detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES): conn = sqlite3.connect(name, detect_types) cur = conn.cursor() print("Opened database successfully") return conn, cur @staticmethod def create_files_table(cur, table_name="Files", ): cur.execute('''DROP TABLE IF EXISTS FILES;''') cur.execute('''CREATE TABLE IF NOT EXISTS FILES ( ID INTEGER PRIMARY KEY NOT NULL, FILENAME TEXT NOT NULL, DATE_TAKEN DATETIME, FILE_TYPE TEXT, FILE_SIZE NUMBER, SRC_DIR VARCHAR(50), TGT_DIR VARCHAR(50));''') return True @staticmethod def insert_record(cur, file=FSfile()): cur.execute('''INSERT INTO FILES (FILENAME, DATE_TAKEN, FILE_TYPE, FILE_SIZE, SRC_DIR, TGT_DIR) VALUES (?, ?, ?, ?, ?, ?);''', (file.get_filename(), file.get_date_taken(), file.get_type(), file.get_size(), file.get_src_dir(), file.get_tgt_dir())) DatabaseUtil.commit_transaction(cur.connection) return True @staticmethod def read_all_records(cur): cur.execute("SELECT * FROM FILES;") return cur.fetchall(); @staticmethod def read_first_record(cur): cur.execute("SELECT * FROM FILES;") return cur.fetchone(); @staticmethod def read_one_record(c, fields_values=[{'FILENAME':'frog.jpg'}]): select_script = "SELECT * FROM FILES WHERE (" for x in fields_values.keys(): select_script += "{0} = {1}".format(x, fields_values[x]) if x.index() < len(fields_values.keys())-1: select_script += "," select_script += ");" c.execute(select_script) # Update @staticmethod def update_record(cur, date_taken, filename): cur.execute('''UPDATE FILES SET (?) TO (?) WHEre ? = ?''', ("DATE_TAKEN", date_taken, "FILENAME", filename)) DatabaseUtil.commit_transaction(cur.connection) return True # Delete @staticmethod def delete_record(cur, id): cur.execute('''DELETE from FILES WHERE ID = ?''', id) DatabaseUtil.commit_transaction(cur.connection) return True @staticmethod def commit_transaction(connection): connection.commit() @staticmethod def close_connection(connection): connection.close() print("Database connection closed")
UTF-8
Python
false
false
2,895
py
48
db_util.py
32
0.553022
0.547841
0
90
31.166667
116
cobuildlab/ezonseller-backend
10,436,770,542,320
a5c5d68471f5666ae6d93d15c529fec24f65aef1
c763799e28b94847edfa31e0a81792d0631b2911
/payment/migrations/0003_auto_20180111_0413.py
600d5024f1a57153c93083896d7c4d8168e4e545
[]
no_license
https://github.com/cobuildlab/ezonseller-backend
4680ddbe0dc4dbe866dde9a19e954918365f290d
5ece80c6c2974790744035a5ae95d9df77648583
refs/heads/master
"2021-07-12T09:23:25.384165"
"2019-04-24T03:48:23"
"2019-04-24T03:48:23"
115,121,338
0
0
null
false
"2021-06-02T02:58:45"
"2017-12-22T14:15:21"
"2019-06-21T17:30:35"
"2021-06-02T02:58:45"
215
1
0
6
Python
false
false
# Generated by Django 2.0 on 2018-01-11 04:13 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('payment', '0002_auto_20180111_0247'), ] operations = [ migrations.RemoveField( model_name='plansubscription', name='accept', ), migrations.AddField( model_name='paymenthistory', name='accept', field=models.BooleanField(default=False, help_text='Accept the terms and conditions?', verbose_name='Accept'), ), ]
UTF-8
Python
false
false
575
py
78
0003_auto_20180111_0413.py
70
0.596522
0.544348
0
22
25.136364
122
rcisterna/perchess-py
5,918,464,955,125
8b2ed93ffb097e260b00913a829bad69d9cf264d
cf5d2b3cce51ae4ada792720164f92edf13db3ea
/perchess/pieces/__init__.py
e61ff3022b6353f94322c89712f1fa4e8af3d060
[]
no_license
https://github.com/rcisterna/perchess-py
95d6f49d5e884f1433d4572f882701765e00a742
fbd6650df6806345deb8bd801c3886e2f87cc54b
refs/heads/master
"2022-11-14T04:43:38.731098"
"2020-07-05T02:36:21"
"2020-07-05T02:36:21"
275,480,785
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" Piezas de ajedrez. """ __all__ = [ "Movement", "Colors", "Piece", "Pawn", "Bishop", "Knight", "Rook", "Queen", "King", ] from perchess.pieces.colors import Colors from perchess.pieces.movement import Movement from perchess.pieces.piece import Piece from perchess.pieces.bishop import Bishop from perchess.pieces.king import King from perchess.pieces.knight import Knight from perchess.pieces.pawn import Pawn from perchess.pieces.queen import Queen from perchess.pieces.rook import Rook
UTF-8
Python
false
false
530
py
19
__init__.py
16
0.713208
0.713208
0
26
19.384615
45
Geek-Tekina/Coding
16,698,832,864,015
dbfefd322cf9249eb5261e52d33ddebbbb7df616
6e652507874d4f6835092e6e504525d71bc1a48d
/Python/hackerrank/Polar cordinates.py
ac07b0e78c15c33ec6904ce4f03d8f8c9d9ca58e
[]
no_license
https://github.com/Geek-Tekina/Coding
be4876b2b3b9a16f32c92bb1cabb3694fb03a837
15ee5c830dbf02d90cc972355c5054471985ebc5
refs/heads/main
"2023-08-07T11:17:58.362604"
"2021-10-01T08:47:25"
"2021-10-01T08:47:25"
412,362,390
2
0
null
true
"2021-10-01T06:58:49"
"2021-10-01T06:58:49"
"2021-10-01T06:15:25"
"2021-10-01T06:32:31"
408
0
0
0
null
false
false
import cmath a = complex(input()) b = list(cmath.polar(a)) print(b[0]) print(b[1])
UTF-8
Python
false
false
90
py
89
Polar cordinates.py
86
0.6
0.577778
0
6
13
24
fjy960120/python-9th-exercise
8,349,416,427,404
70fc99279eb8e70eacf3625c86b609b58e53fbc7
a8bd91d5b39bd9cc436c0b1454bc485ab9431109
/9.1.py
bcada02014900b99cef098a9626c5d03a69f7390
[]
no_license
https://github.com/fjy960120/python-9th-exercise
28d05b03164d3590bead9724ccc1ef5fcfc94930
38d2ff72c3068ca541164941a63a63350a2b988b
refs/heads/master
"2020-06-29T16:47:02.190328"
"2019-08-05T02:57:01"
"2019-08-05T02:57:01"
200,570,785
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
class Dog(): """一次模拟小狗的简单尝试""" def __init__(self,name,age): """初始化属性name和age""" self.name = name self.age = age def sit(self): """模拟小狗被命令时蹲下""" print(self.name.title()+" is siting now") def roll_over(self): """模拟小狗被命令时打滚""" print(self.name.title()+" is rulling over now") my_dog = Dog('willian',6) print("My dog's name is : "+my_dog.name.title()) print("My dog's age is : "+ str(my_dog.age)) my_dog.sit() my_dog.roll_over() your_dog = Dog('alice',5) lily_dog = Dog('beta',5) print("Lily have a dog ,It's name is : "+lily_dog.name.title()) print("Your dog's name is : " + your_dog.name.title()) print("Lily have a dog,It's age is "+str(lily_dog.age)) lily_dog.sit() lily_dog.roll_over() """9.1.1练习""" class Resaurant(): def __init__(self,restaurant_name,cuisine_type): self.name = restaurant_name self.cuisinetype = cuisine_type def describe_restaurant(self): print("\nThis restaurant's name is : "+ self.name.title()) print("The restaurant's cuisine type are : "+self.cuisinetype.title()) def open_restaurant(self): print("This reataurant is openning!") restaurant = Resaurant('homoo','chinese') restaurant.describe_restaurant() restaurant.open_restaurant() restaurant_0 = Resaurant('毛肚火锅店','川菜') restaurant_0.describe_restaurant() restaurant_1 = Resaurant('炸酱面店','北京菜') restaurant_1.describe_restaurant() restaurant_2 = Resaurant('早茶店','粤菜') restaurant_2.describe_restaurant() """9.1.3练习""" class User(): def __init__(self,frist_name,last_name,**users): self.fristname = frist_name self.lastname = last_name self.users = users # for j,k in users.items(): # users[j]=k #self.users = user # self.j = k def describe_user(self): print("\nUsers information is: ") print("\t users frist name is: "+ self.fristname.title()) print("\t users last name is: "+ self.lastname.title()) print("\n Users other information are: ") for j,k in self.users.items(): users[j]=k print("\t"+j.title()+" : "+ k.title()) def greet_user(self): print("\nHello! "+self.fristname.title()+self.lastname.title()) users={} user_0 = User('lily','edward',age='16',hometown='beijing') user_0.describe_user() user_0.greet_user() print(users)
UTF-8
Python
false
false
2,499
py
10
9.1.py
9
0.612022
0.603615
0
68
34
78
adafruit/Adafruit_Learning_System_Guides
2,972,117,410,002
61ef9a3984f5b534b9ea259f2652f551219d0243
091a6200be74bf6577c86f623665bcc24e16b02b
/NeoKey_BFF_Examples/CP_Keyboard_Example/code.py
8814a1ab9f2df376b6247bdd0e8346d631f31cb8
[ "MIT" ]
permissive
https://github.com/adafruit/Adafruit_Learning_System_Guides
b5f7bce40a16da64e7a79d4b39de032f2cca41d4
5eaa7a15a437c533b89f359a25983e24bb6b5438
refs/heads/main
"2023-09-05T18:31:41.621956"
"2023-09-05T15:36:09"
"2023-09-05T15:36:09"
105,065,494
937
937
MIT
false
"2023-09-12T18:48:53"
"2017-09-27T20:22:44"
"2023-09-09T07:27:13"
"2023-09-12T18:48:52"
513,998
894
760
53
C
false
false
# SPDX-FileCopyrightText: 2023 Liz Clark for Adafruit Industries # # SPDX-License-Identifier: MIT """Basic HID Macro with NeoKey BFF Example""" import time import board from digitalio import DigitalInOut, Direction, Pull import neopixel import usb_hid from adafruit_hid.keyboard import Keyboard from adafruit_hid.keycode import Keycode # setup onboard NeoPixel pixel_pin = board.A3 num_pixels = 1 pixel_color = (0, 255, 0) off = (0, 0, 0) pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=0.3, auto_write=False) # The Keycode sent for each button, will be paired with a control key key = Keycode.F modifier_key = Keycode.CONTROL # The keyboard object! time.sleep(1) # Sleep for a bit to avoid a race condition on some systems keyboard = Keyboard(usb_hid.devices) # setup onboard button switch = DigitalInOut(board.A2) switch.direction = Direction.INPUT switch.pull = Pull.UP switch_state = False while True: # if the button is not pressed.. if switch.value and switch_state: pixels.fill(off) pixels.show() keyboard.release_all() switch_state = False # if the button is pressed.. elif not switch.value and not switch_state: pixels.fill(pixel_color) pixels.show() keyboard.press(modifier_key, key) switch_state = True time.sleep(0.05)
UTF-8
Python
false
false
1,339
py
2,303
code.py
1,801
0.713966
0.698282
0
52
24.75
83
aikinogard/basis1d
17,111,149,714,040
eabebe8417da5d1472831ae701ed96c3cecd6769
c739c9d6ec368bf06fd7da2b95ccf15a11c6ef90
/basis1d/cgbf.py
7aedc19f64e35708a70192c248e756355f4df513
[]
no_license
https://github.com/aikinogard/basis1d
fe08c4bfa81764368b6b7adfb035e60832eaf02f
5fb6cf05371dbd0cbfa45159935d621c4326bd0c
refs/heads/master
"2020-05-31T21:29:34.085159"
"2015-05-07T20:17:40"
"2015-05-07T20:17:40"
33,433,737
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np import array from basis1d.pgbf import pgbf from basis1d.tools import S class cgbf: contracted = True def __init__(self,origin=0,power=0,exps=[],coefs=[]): self.origin = float(origin) self.power = float(power) # cgbf is made by list of pgbf self.pgbfs = [] # the coefficient of each pgbf self.coefs = array.array('d') # normalization constant of pgbf self.pnorms = array.array('d') # exponential of each pgbf self.pexps = array.array('d') for expn,coef in zip(exps,coefs): self.add_pgbf(expn,coef,False) if self.pgbfs: self.normalize() def __getitem__(self,item): return list(zip(self.coefs,self.pgbfs)).__getitem__(item) def __repr__(self): return "cgbf(%f,%d,%s,%s)"%(self.origin,self.power,list(self.pexps),list(self.coefs)) def __call__(self,*args,**kwargs): return sum(c*p(*args,**kwargs) for c,p in self) def grid(self,xs): return sum(c*p.grid(xs) for c,p in self) def add_pgbf(self,expn,coef,renormalize=True): self.pgbfs.append(pgbf(expn,self.origin,self.power)) self.coefs.append(coef) if renormalize: self.normalize() p = self.pgbfs[-1] self.pnorms.append(p.norm) self.pexps.append(p.expn) def normalize(self): Saa_sqrt = np.sqrt(S(self,self)) for i in range(len(self.coefs)): self.coefs[i] /= Saa_sqrt
UTF-8
Python
false
false
1,310
py
21
cgbf.py
12
0.675573
0.671756
0
53
23.735849
87
dan9thsense/contest_submission
6,493,990,590,264
e74fe729920ba0ae9df0632d87b4e152785c6b04
5316f01200ab0ffa6988d311aed8218c60b5261e
/submission/test_submission/runDocker.py
5d9fcfeaf40164199bbabb65d6e6635ecaa6fbf2
[]
no_license
https://github.com/dan9thsense/contest_submission
e5a0098871b825a4d123ee7763bd81195a996470
7f24e5d8257e7560bc5f3cb1ccad0f12d6594c8d
refs/heads/master
"2020-08-30T12:55:00.894801"
"2019-10-29T21:39:42"
"2019-10-29T21:39:42"
218,386,935
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import importlib.util from animalai.envs.gym.environment import AnimalAIEnv from animalai.envs.arena_config import ArenaConfig MULTIPLE_CONFIGS = True NUM_EPISODES = 2 def main(): # Load the agent from the submission # this is the standard way that python 3.5+ versions import a file # and a class within that file, from a specific path location # agent.py was copied into the docker image in the folder /aaio # and agent.py has a class, Agent, that we use to interact with # https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path print('Loading your agent') try: spec = importlib.util.spec_from_file_location('agent_module', '/aaio/agent.py') agent_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(agent_module) submitted_agent = agent_module.Agent() except Exception as e: print('Your agent could not be loaded, make sure all the paths are absolute, error thrown:') raise e print('Agent successfully loaded') arena_config_in = ArenaConfig('/aaio/configs/1-Food.yaml') print('Resetting your agent') try: submitted_agent.reset(t=arena_config_in.arenas[0].t) except Exception as e: print('Your agent could not be reset:') raise e env = AnimalAIEnv( environment_filename='/aaio/test/env/AnimalAI', seed=0, retro=False, n_arenas=1, worker_id=1, docker_training=True, ) print('Running ', NUM_EPISODES, ' episodes') configs = [] configs.append('/aaio/configs/1-Food.yaml') if MULTIPLE_CONFIGS: configs.append('/aaio/configs/2-Preferences.yaml') configs.append('/aaio/configs/3-Obstacles.yaml') configs.append('/aaio/configs/4-Avoidance.yaml') configs.append('/aaio/configs/5-SpatialReasoning.yaml') configs.append('/aaio/configs/6-Generalization.yaml') configs.append('/aaio/configs/7-InternalMemory.yaml') configs.append('/aaio/configs/temporary_blackout.yaml') configs.append('/aaio/configs/permanent_blackout.yaml') configs.append('/aaio/configs/permanent_blackout_with_wall_and_bad_goal.yaml') configs.append('/aaio/configs/hot_zone.yaml') configs.append('/aaio/configs/movingFood.yaml') configs.append('/aaio/configs/forcedChoice.yaml') configs.append('/aaio/configs/objectManipulation.yaml') configs.append('/aaio/configs/allObjectsRandom.yaml') config_results = [] for config in configs: print('starting ', config) average_num_actions = 0 average_reward = 0 num_time_outs = 0 arena_config_in = ArenaConfig(config) for k in range(NUM_EPISODES): time_out = False num_actions = 0 episode_reward = 0 episode_results = [] env.reset(arenas_configurations=arena_config_in) print('Episode {} starting'.format(k)) try: obs, reward, done, info = env.step([0, 0]) for i in range(arena_config_in.arenas[0].t): action = submitted_agent.step(obs, reward, done, info) num_actions += 1 obs, reward, done, info = env.step(action) episode_reward += reward if done: if i == arena_config_in.arenas[0].t - 1: time_out = True submitted_agent.reset(arena_config_in.arenas[0].t) break except Exception as e: print('Episode {} failed'.format(k)) raise e print('Episode {0} completed, num actions {1}, reward {1}'.format(k, num_actions, episode_reward, time_out)) #episode_results.append([config, k, num_actions, episode_reward, time_out]) average_reward += episode_reward average_num_actions += num_actions if time_out: num_time_outs += 1 config_results.append([config, average_num_actions / NUM_EPISODES, average_reward / NUM_EPISODES, num_time_outs]) print("config results: config, avg number of actions, average reward, number of timeouts") for result in config_results: print(result) if __name__ == '__main__': main()
UTF-8
Python
false
false
4,408
py
36
runDocker.py
10
0.609347
0.60118
0
109
39.440367
121
panuta/thaihealthplm
6,837,587,941,755
eb0ff8ba6b8956f667b1e2687b610338a2115249
58b5035b8de1d2e60917f27b66fe28c190610861
/homepage/urls.py
a6d2173fb40b37bc2a47184820883db3649a53d3
[]
no_license
https://github.com/panuta/thaihealthplm
9babe9ad70028dea90d1ea240a06087c78b8d15a
4d62e2eed5ea9bad9c4ee78be8bb9b7cd8e00a04
refs/heads/master
"2020-04-15T04:24:14.006943"
"2010-07-16T13:31:46"
"2010-07-16T13:31:46"
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.conf.urls.defaults import * urlpatterns = patterns('homepage.views', url(r'^home/$', 'view_dashboard', name='view_dashboard'), )
UTF-8
Python
false
false
151
py
31
urls.py
20
0.675497
0.675497
0
6
24.166667
61
stevenzjj/niuyuan_platform
16,243,566,338,104
9cc6342b73ed85ead05cb03169dfd8a36c7ee3c1
92b9dd58e4fa5d1513f3a3ba19cfc7623450c987
/host_mgr/action_list.py
da8537a9ba95de1c4d0e82aa79f8e0b6d4ed6a55
[]
no_license
https://github.com/stevenzjj/niuyuan_platform
58fc0aa179aa23ea2e1bdfefa74440ba72998536
ca89993db293e79d8f95444080a1dcd6db91e429
refs/heads/master
"2018-04-20T05:17:38.293782"
"2017-11-21T02:09:31"
"2017-11-21T02:09:31"
90,713,035
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding:utf-8 -*- from host_mgr.plugins import cmd, state actions = { "cmd": cmd.CMD, "state": state.State, }
UTF-8
Python
false
false
151
py
56
action_list.py
44
0.582781
0.576159
0
8
17.25
39
cuevas1208/Tradebot
11,149,735,118,877
4026768ea47689def101f11c9cbbfade674d7413
5af641702c966f63d3443c0545c14eaaa4e20cde
/src/sklearn_main.py
5b1f5a5335424dc8f9939d47ad402be617529e46
[ "MIT" ]
permissive
https://github.com/cuevas1208/Tradebot
cea8d279b6a0bffd69bebde64484a8278c8d6518
f6499bc75d625414c9a474c774912cb502a153d8
refs/heads/master
"2021-01-23T02:06:02.692145"
"2020-04-26T19:41:20"
"2020-04-26T19:41:20"
92,904,911
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# skelearn.py # Prepare dataSets for training # techniques learn on the bottom site were implemented # to build this ml module # referene https://pythonprogramming.net/machine-learning-stock-prices-python-programming-for-finance/ ################################################################################ import argparse import logging import os from collections import Counter import pandas as pd import numpy as np from sklearn import svm, neighbors from sklearn.ensemble import VotingClassifier, RandomForestClassifier from sklearn.externals import joblib from src.correlation_test import get_correlation from src.conf import PREDICTION_DAYS, BATCH_LEN, MODELS_PATH, STOCK_TO_PREDICT, LABEL_TO_PREDICT, \ PERCENTAGE_THRESHOLD from src.data_functions.data_load import get_dataframe logger = logging.getLogger() def process_data_for_labels(ticker_feature, df): """ process_data_for_labels # input days to analyze data # it would output the percentage increase/decrease of the stock in PREDICTION_DAYS # by shifting the columns to PREDICTION_DAYS :param ticker_feature: string name of the ticker :param df: dataset fragment :return: df with adjusted percentage change """ print('loading:', ticker_feature, df.shape) # delete all NAN array with rows deleted df.dropna(subset=[ticker_feature], inplace=True) logging.debug("df shape after eliminating NAN from labels", df.shape) # get future labels for classification, shift data df['{}_labels'.format(ticker_feature)] = \ (df[ticker_feature].shift(-PREDICTION_DAYS) - df[ticker_feature]) / df[ticker_feature] # return a list of labels with no NAN df.dropna(subset=['{}_labels'.format(ticker_feature)], inplace=True) logging.debug("df shape after eliminating NAN from labels", df.shape) logging.debug("labels with days difference", df['{}_labels'.format(ticker_feature)]) # transform labels in to 1, 0 and -1 ==> buy_hold_sell df['{}_labels'.format(ticker_feature)] = list(map(set_buy_sell_hold, df['{}_labels'.format(ticker_feature)])) logging.debug("labels after the one_hot", df['{}_labels'.format(ticker_feature)]) # visualize spread values = df['{}_labels'.format(ticker_feature)].values.tolist() str_values = [str(i) for i in values] print('Data spread:', Counter(str_values), "total labels: ", len(values)) # handle any invalid numbers df.fillna(0, inplace=True) df.replace([np.inf, -np.inf], np.nan, inplace=True) df.dropna(inplace=True) return df def set_buy_sell_hold(*args): """ str_vals is a list of our data labels -1 = decrees 0 = stayed same -1 = increased """ cols = [c for c in args] for col in cols: if col > PERCENTAGE_THRESHOLD: return 1 if col < -PERCENTAGE_THRESHOLD: return -1 return 0 def extract_features_method_1(df): # tickers are all the column_labels that would be for training excluding training_labels tickers = df.columns.values.tolist() # pct_change() normalize the value to be percentage change df_values = df[[ticker for ticker in tickers]].pct_change() df_values.replace([np.inf, -np.inf], 0, inplace=True) df_values.fillna(0, inplace=True) print("x features shape: ", df_values.shape) x = df_values.values return x, df def extract_features_method_2(df, ticker): # get correlation stock correlation_stock = get_correlation(df, ticker + LABEL_TO_PREDICT, top_samples=1, set_len=12).index def shift_data(corr, pct=False): if pct: df[corr] = df[corr].pct_change() x = df[corr].shift(0).values for i in range(1, BATCH_LEN): x = np.dstack((x, df[corr].shift(i).values)) return x # calculates the medan of low df['x_features'] = df[ticker + LABEL_TO_PREDICT] # df[ticker + '_Low'] + df[ticker + '_High'] / 2 # normalize the value to be percent change df['x_features'] = df['x_features'].pct_change() print('average percentage change {} in the last 20 days {}\n\n'.format(np.abs(df['x_features']).mean(), np.abs(df['x_features'][-20:]).mean())) # extract past feature data including today's data if known # Todo: currently row goes from new to old x = shift_data('x_features') for corr in correlation_stock: new_x = (shift_data(corr, pct=True)) x = np.concatenate([x, new_x], -1) x = x[0].tolist() df['training_features'] = x # handles invalid numbers # df.replace([np.inf, -np.inf], 0, inplace=True) # df.fillna(0, inplace=True) return df def get_training_dataset(ticker, df): # change values to percentage of change within the days intended to predict df = process_data_for_labels(ticker + LABEL_TO_PREDICT, df) # x, _ = extract_features_method_1(df) df = extract_features_method_2(df, ticker) # take the last 7 days as validation samples before shuffle # this is mainly for visualization sample_size = 15 valid = df[-sample_size:] y = valid[ticker + LABEL_TO_PREDICT + '_labels'].values x = valid['training_features'].tolist() x = np.asarray(x) dates = valid['Date'].tolist() real_x = valid[ticker + LABEL_TO_PREDICT].values valid_sample = {'x': x, 'y': y, 'dates': dates, 'p_dates': dates, "real_x": real_x} # shuffles dataset new_df = df[:-sample_size] new_df = new_df.sample(frac=1).reset_index(drop=True) # Todo: Balance data y = new_df[ticker + LABEL_TO_PREDICT + '_labels'].values values, count = np.unique(y, return_counts=True) limit = np.min(count) limit_value = values[np.argmin(count)] # load dataset y = new_df[ticker + LABEL_TO_PREDICT + '_labels'].values x = new_df['training_features'].tolist() x = np.asarray(x) dates = new_df['Date'].tolist() # clean data mask = ~np.isnan(x).any(axis=1) x = x[mask]; y = y[mask] mask = np.isfinite(x).any(axis=1) x = x[mask]; y = y[mask] print('input training labels shape:', y.shape, ' and features shape:', x.shape) return x, y, dates, valid_sample def train(x, y): logging.debug("X sample: \ {} ".format(len(x.shape))) logging.debug("y sample: \ {} ".format(len(y.shape))) # random shuffle and split test_size = int(len(y) * 0.2) x_train, x_test, y_train, y_test = x[test_size:], x[:test_size], y[test_size:], y[:test_size] # combine the predictions of several base estimators clf = VotingClassifier([('lsvc', svm.LinearSVC()), ('knn', neighbors.KNeighborsClassifier()), ('rfor', RandomForestClassifier())]) clf.fit(x_train, y_train) # test data prediction np.set_printoptions(precision=2) confidence = clf.score(x_test, y_test) print('accuracy:', confidence) return confidence, clf def forecast(ticker, df): # load dataset x, y, dates, valid_set = get_training_dataset(ticker, df) # load model if not os.path.exists(ticker): # train model confidence, clf = train(x, y) # save model if not os.path.exists(MODELS_PATH): os.makedirs(MODELS_PATH) joblib.dump(clf, MODELS_PATH + ticker + '.pkl') # run valid_set from src.visualization import matplot_graphs valid_set['y_pred'] = clf.predict(valid_set['x']) matplot_graphs.plot_histogram(valid_set, ticker, confidence, PREDICTION_DAYS) # last dataset sample clf = joblib.load(MODELS_PATH + ticker + '.pkl') # get forecast x = x[-1:] forecast_ = clf.predict(x) print('\n', ticker, ': Based in the last', BATCH_LEN, 'market days, the forecast is ', forecast_ * PERCENTAGE_THRESHOLD, '% in next', PREDICTION_DAYS, 'days\n') if __name__ == "__main__": import warnings, sys if not sys.warnoptions: warnings.simplefilter("ignore") # parser initial arguments # logging.basicConfig(filename='log.log', level=logging.INFO) parser = argparse.ArgumentParser(description='Optional app description') # run -v for debug mode parser.add_argument('-v', "--verbose", action='store_true', help='Type -v to do debugging') args = parser.parse_args() # set debug mode if args.verbose: logger.setLevel(logging.DEBUG) # forecast chosen ticker df = get_dataframe(STOCK_TO_PREDICT) forecast(STOCK_TO_PREDICT, df) # forecast all sp500 # sp500_list = getsp500() # for sp500_ticker in sp500_list: # df = get_dataframe(sp500_ticker) # forecast(sp500_ticker, df)
UTF-8
Python
false
false
8,712
py
209
sklearn_main.py
18
0.635675
0.628673
0
257
32.898833
114
Sohoni-Agarwal/QA_CommunityForum
8,280,696,972,159
2943ca5d54b6be3988f153aa0ad8041bead4dd99
a5feac3c37c18ad4d8c1391fce1f4c02a512081e
/admins/urls.py
892233402212440ffe139b60de52c29061038c3c
[]
no_license
https://github.com/Sohoni-Agarwal/QA_CommunityForum
76bcadea338dab35b0aafbe936dfc91507b5f8ab
f9482149ce0a91e8f24d34913f03fb9764d07f6c
refs/heads/master
"2022-12-17T18:12:43.348442"
"2019-11-05T14:56:25"
"2019-11-05T14:56:25"
213,489,504
0
0
null
false
"2022-12-08T06:41:28"
"2019-10-07T21:22:34"
"2019-11-05T15:08:58"
"2022-12-08T06:41:26"
48,013
0
0
8
HTML
false
false
from django.urls import path from QA_CommunityForum import views urlpatterns = [ # path('', views.index), # path('', views.HomePageView.as_view(), name='home') ]
UTF-8
Python
false
false
171
py
150
urls.py
110
0.672515
0.672515
0
7
23.428571
57
crowdbotics-apps/web-23-dev-6461
11,673,721,135,817
ee493bfbfb98f18704c1df4170da1e101fa6a32d
bf72dbcc8cdeeab0c1570269fdaeb44987fd944e
/home/migrations/0003_auto_20200623_0752.py
6553287ab8d0c60eaa35282ae05ebee028038f12
[]
no_license
https://github.com/crowdbotics-apps/web-23-dev-6461
8dffd49588ff0ea8538f1888b398391f90956a54
1d1b8e8c163873a4cbecacc73b921291389929ce
refs/heads/master
"2022-11-11T18:25:25.479658"
"2020-06-23T07:58:32"
"2020-06-23T07:58:32"
274,312,991
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Generated by Django 2.2.13 on 2020-06-23 07:52 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("home", "0002_load_initial_data"), ] operations = [ migrations.AddField( model_name="customtext", name="ghfjhfgjhgfjhgfjh", field=models.BigIntegerField(blank=True, null=True), ), migrations.AddField( model_name="customtext", name="jhghfkjfkjgf", field=models.BigIntegerField(blank=True, null=True), ), ]
UTF-8
Python
false
false
585
py
2
0003_auto_20200623_0752.py
2
0.588034
0.553846
0
23
24.434783
64
SagittariusA-Star/AST5220-Milestone-I
3,100,966,411,720
21950f0c931fbc7fbe6644791050c214bceeac4c
8668ef7e84aa9dd9f595c8959936a288de1ae0f2
/src/plot_recombination.py
2476f245f56bbf618c4b8abd45b413b01ace60c8
[]
no_license
https://github.com/SagittariusA-Star/AST5220-Milestone-I
c38575f085b9b0c3eae02af0ab5e84e7d97feea3
3525cf59a40854747d695b658307b53f2ddb8089
refs/heads/master
"2020-12-26T22:15:14.736156"
"2020-06-08T18:08:31"
"2020-06-08T18:08:31"
237,663,441
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np import matplotlib.pyplot as plt import matplotlib from astropy import constants as const from astropy import units as u plt.style.use("bmh") fonts = { "font.family": "serif", "axes.labelsize": 18, "font.size": 12, "legend.fontsize": 18, "xtick.labelsize": 12, "ytick.labelsize": 12, } plt.rcParams.update(fonts) # Density parameters needed to plot background # color corresponding to domination era cosmo_data = np.loadtxt("cosmology.txt") OmegaB = cosmo_data[:, 3] OmegaCDM = cosmo_data[:, 4] OmegaLambda = cosmo_data[:, 5] OmegaR = cosmo_data[:, 6] Omega_sum = OmegaB + OmegaCDM + OmegaLambda + OmegaR Omega_m = OmegaCDM + OmegaB # Loading data from file recombo_data = np.loadtxt("recombination.txt") x = recombo_data[:, 0] Xe = recombo_data[:, 1] ne = recombo_data[:, 2] * u.m ** (- 3) tau = recombo_data[:, 3] dtaudx = recombo_data[:, 4] ddtaudxdx = recombo_data[:, 5] g_tilde = recombo_data[:, 6] dg_tildedx = recombo_data[:, 7] ddg_tildeddx = recombo_data[:, 8] Saha_Xe = recombo_data[:, 9] # Computing printout data g_integral = np.trapz(g_tilde, x = x) x_rec = x[np.argmin(np.abs(Xe - 0.5))] a_rec = np.exp(x_rec) z_rec = 1 / a_rec - 1 Saha_x_rec = x[np.argmin(np.abs(Saha_Xe - 0.5))] Saha_a_rec = np.exp(Saha_x_rec) Saha_z_rec = 1 / Saha_a_rec - 1 log_rel_error = np.log10(np.abs(g_integral - 1)) x_lss = x[np.where(g_tilde == g_tilde.max())][0] a_lss = np.exp(x_lss) z_lss = 1 / a_lss - 1 g_max = g_tilde.max() tau_transparent = tau[np.abs(tau - 1).argmin()] x_transparent = x[np.abs(tau - 1).argmin()] # Printout of interesting information print("----------------------------Some interesting quantities------------------------") print("Integral of g_tilde: {0}, log rel error: {1}".format(g_integral, log_rel_error)) print("Maximum of g_tilde: x = {0}, g_tilde = {1}".format(x_lss, g_max)) print("Optical depth: tau = {0} at x = {1}".format(tau_transparent, x_transparent)) print("Redshift at last scattering: z = {0}".format(z_lss)) print("Recombination (Xe = 0.5): x = {0}, z = {1}".format(x_rec, z_rec)) print("Recombination Saha (Xe = 0.5): x = {0}, z = {1}".format(Saha_x_rec, Saha_z_rec)) print("-------------------------------------------------------------------------------") # Generating plots fig = plt.figure(figsize=[1.5 * 7.1014, 1.5 * 7.1014 / 1.618]) # Plotting electron fraction ax10 = plt.subplot(221) ax10.scatter(x_rec, Xe[np.where(x == x_rec)], color = "r", label=r"$(x_{rec}, X_{e,rec})$", zorder = 3) ax10.plot(x, Xe, label=r"$X_e(x)$", zorder = 1) ax10.text(-10, Xe[np.where(x == x_rec)], r"$({0:.2f}, {1:.2f})$".format(x_rec, 0.5), color = "r") ax10.scatter(Saha_x_rec, Saha_Xe[np.where(x == Saha_x_rec)], color = "g", zorder = 4, label=r"$(x_{rec}, X_{e,rec}^{Saha})$") ax10.plot(x, Saha_Xe, label=r"$X_e^{Saha}(x)$", zorder = 2) ax10.text(-10, 0.5 * Saha_Xe[np.where(x == Saha_x_rec)], r"$({0:.2f}, {1:.2f})$".format(Saha_x_rec, 0.5), color = "g") ax10.legend(fontsize = 16) ax10.set_xlabel(r"$x = \log (a)$") ax10.set_ylabel(r"$X_e \approx n_e / n_H$") ax10.set_yscale("log") ax10.set_xlim(-12, 0) ax10.set_ylim(1.5e-4, 5) ax10.axvspan( np.min(x), x[np.where(OmegaB + OmegaCDM >= OmegaLambda + OmegaR)][0], alpha=0.3, color="orange", ) ax10.axvspan( x[np.where(OmegaB + OmegaCDM >= OmegaLambda + OmegaR)][0], x[np.where(OmegaLambda >= Omega_sum - OmegaLambda)][0], alpha=0.3, color="cyan", ) ax10.axvspan( x[np.where(OmegaLambda >= Omega_sum - OmegaLambda)][0], np.max(x), alpha=0.3, color="purple", ) # Plotting electron density ax11 = plt.subplot(222) ax11.plot(x, ne, label=r"$n_e(x)$") ax11.legend() ax11.set_xlabel(r"$x = \log (a)$") ax11.set_ylabel(r"$n_e [\mathrm{m^{-3}}]$") ax11.set_yscale("log") ax11.set_xlim(-12, 0) ax11.axvspan( np.min(x), x[np.where(OmegaB + OmegaCDM >= OmegaLambda + OmegaR)][0], alpha=0.3, color="orange", ) ax11.axvspan( x[np.where(OmegaB + OmegaCDM >= OmegaLambda + OmegaR)][0], x[np.where(OmegaLambda >= Omega_sum - OmegaLambda)][0], alpha=0.3, color="cyan", ) ax11.axvspan( x[np.where(OmegaLambda >= Omega_sum - OmegaLambda)][0], np.max(x), alpha=0.3, color="purple", ) # Plotting optical depth ax12 = plt.subplot(212) ax12.plot(x, tau, label=r"$\tau(x)$") ax12.plot(x, - dtaudx, label=r"$-\tau'(x)$", linestyle = "--") ax12.plot(x, ddtaudxdx, label=r"$\tau''(x)$", linestyle = "-.") ax12.scatter(x_transparent, tau_transparent, color = "r", label = r"$(x_{lss}, \tau_{lss})$") ax12.text(-8.2, 1, "({0:.2f}, 1)".format(x_transparent), color = "r") ax12.legend() ax12.set_xlabel(r"$x = \log (a)$") ax12.set_ylabel(r"$\tau(x)$") ax12.set_ylim(1e-8, 1e8) ax12.set_xlim(-12, 0) ax12.set_yscale("log") ax12.axvspan( np.min(x), x[np.where(OmegaB + OmegaCDM >= OmegaLambda + OmegaR)][0], alpha=0.3, color="orange", ) ax12.axvspan( x[np.where(OmegaB + OmegaCDM >= OmegaLambda + OmegaR)][0], x[np.where(OmegaLambda >= Omega_sum - OmegaLambda)][0], alpha=0.3, color="cyan", ) ax12.axvspan( x[np.where(OmegaLambda >= Omega_sum - OmegaLambda)][0], np.max(x), alpha=0.3, color="purple", ) fig.tight_layout() fig.savefig("../doc/Figures/Xe_ne_tau.pdf", dpi=1000) # Plotting visibility function, derivative and second derivative thereof fig1, ax1 = plt.subplots(2, 2 , figsize=[1.5 * 7.1014, 1.5 * 7.1014 / 1.618]) ax1[0, 0].scatter(x_lss, g_max, color = "r", label = r"$(x_{lss}, \tilde{g}_{lss})$") ax1[0, 0].plot(x, g_tilde, label=r"$\tilde{g}(x)$") ax1[0, 0].text(-10, g_max, r"$({0:.2f}, {1:.2f})$".format(x_lss, g_max), color = "r") ax1[0, 0].set_xlim(-12, 0) ax1[0, 0].legend() ax1[0, 0].set_ylabel(r"$\tilde{g}(x)$") ax1[0, 0].set_xlabel(r"$x = \log (a)$") ax1[0, 0].axvspan( np.min(x), x[np.where(OmegaB + OmegaCDM >= OmegaLambda + OmegaR)][0], alpha=0.3, color="orange", ) ax1[0, 0].axvspan( x[np.where(OmegaB + OmegaCDM >= OmegaLambda + OmegaR)][0], x[np.where(OmegaLambda >= Omega_sum - OmegaLambda)][0], alpha=0.3, color="cyan", ) ax1[0, 0].axvspan( x[np.where(OmegaLambda >= Omega_sum - OmegaLambda)][0], np.max(x), alpha=0.3, color="purple", ) ax1[0, 1].plot(x, dg_tildedx, label=r"$\tilde{g}'(x)$") ax1[0, 1].set_ylabel(r"$\frac{d\tilde{g}}{dx}(x)$") ax1[0, 1].set_xlabel(r"$x = \log (a)$") ax1[0, 1].set_xlim(-12, 0) ax1[0, 1].legend() ax1[0, 1].axvspan( np.min(x), x[np.where(OmegaB + OmegaCDM >= OmegaLambda + OmegaR)][0], alpha=0.3, color="orange", ) ax1[0, 1].axvspan( x[np.where(OmegaB + OmegaCDM >= OmegaLambda + OmegaR)][0], x[np.where(OmegaLambda >= Omega_sum - OmegaLambda)][0], alpha=0.3, color="cyan", ) ax1[0, 1].axvspan( x[np.where(OmegaLambda >= Omega_sum - OmegaLambda)][0], np.max(x), alpha=0.3, color="purple", ) ax1[1, 0].plot(x, ddg_tildeddx, label=r"$\tilde{g}''(x)$") ax1[1, 0].legend() ax1[1, 0].set_xlim(-12, 0) ax1[1, 0].set_ylabel(r"$\frac{d^2\tilde{g}}{dx^2}(x)$") ax1[1, 0].set_xlabel(r"$x = \log (a)$") ax1[1, 0].axvspan( np.min(x), x[np.where(OmegaB + OmegaCDM >= OmegaLambda + OmegaR)][0], alpha=0.3, color="orange", ) ax1[1, 0].axvspan( x[np.where(OmegaB + OmegaCDM >= OmegaLambda + OmegaR)][0], x[np.where(OmegaLambda >= Omega_sum - OmegaLambda)][0], alpha=0.3, color="cyan", ) ax1[1, 0].axvspan( x[np.where(OmegaLambda >= Omega_sum - OmegaLambda)][0], np.max(x), alpha=0.3, color="purple", ) # Peak normalized visibility function and its derivatives ax1[1, 1].plot(x, g_tilde / np.max(g_max), label=r"$\tilde{g}(x)$") ax1[1, 1].plot(x, dg_tildedx / np.max(np.max(dg_tildedx)), label=r"$\tilde{g}'(x)$") ax1[1, 1].plot(x, ddg_tildeddx / np.max(np.abs(ddg_tildeddx)), label=r"$\tilde{g}''(x)$") ax1[1, 1].legend(loc = 1) ax1[1, 1].set_xlim(-7.5, -5.7) ax1[1, 1].set_ylabel(r"Peak normalized") ax1[1, 1].set_xlabel(r"$x = \log (a)$") ax1[1, 1].axvspan( np.min(x), x[np.where(OmegaB + OmegaCDM >= OmegaLambda + OmegaR)][0], alpha=0.3, color="orange", ) ax1[1, 1].axvspan( x[np.where(OmegaB + OmegaCDM >= OmegaLambda + OmegaR)][0], x[np.where(OmegaLambda >= Omega_sum - OmegaLambda)][0], alpha=0.3, color="cyan", ) ax1[1, 1].axvspan( x[np.where(OmegaLambda >= Omega_sum - OmegaLambda)][0], np.max(x), alpha=0.3, color="purple", ) fig1.tight_layout() fig1.savefig("../doc/Figures/g_tilde.pdf", dpi=1000) plt.show()
UTF-8
Python
false
false
8,638
py
34
plot_recombination.py
10
0.584395
0.532878
0
279
29.960573
125
qimo00/timo
10,711,648,477,831
720953b131d21ca437a75bde9afed3d6f4fa08aa
86146192c0162428887102cdd15f43d338216ee9
/test1_login()def.py
ebdd3f0da43f3ad9e228dcf7b55c38f75490a767
[]
no_license
https://github.com/qimo00/timo
46d894323a2a82c8170162f343d1c5d5ed1fa690
01f5a1e6ef304479fd806cc607cd99f3c2d8ba41
refs/heads/master
"2021-01-18T20:56:58.736075"
"2017-04-25T09:56:14"
"2017-04-25T09:56:14"
69,218,151
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
user_data={} def logIn(): while True: print('|---新建用户:N/n---|') print('|---登录账号:E/e---|') print('|---退出账号:Q/q---|') print('请输入命令代码:') command=input() if command=='N' or command=='n': newC() # continue elif command=='E' or command=='e': enterC() # continue elif command=='Q' or command =='q': break else: print('输入命令有误,请重新输入:\n') print('退出成功~') def newC(): while True: name = input('请输入用户名:') if name in user_data: print('该用户名已存在,请重新输入') continue else: code=input('请输入密码:') user_data[name]=code print('创建用户成功!') break return def enterC(): name=input('请输入用户名:') if name in user_data: code=input('请输入密码:') if code==user_data[name]: print("登录成功!") else: print('密码错误~') else: print('该用户不存在!') return logIn()
UTF-8
Python
false
false
1,240
py
43
test1_login()def.py
41
0.438343
0.438343
0
50
19.76
43
bartek412/IntermediatePythonCourse
3,788,161,202,313
0d6fc7afb19473bc50773413e61171b5c9cd428a
504cbe0df5cc4ff320b4f3168414b7168c04e392
/task49_args&kwargs.py
67ec37c1ac69ba4730237fd53dccc8d2b1d316fe
[]
no_license
https://github.com/bartek412/IntermediatePythonCourse
5f93e4c86f3c883a719b8da0283b48e2a0053926
73a41a0017906c982f23eaa5a6d50cd44469ceef
refs/heads/master
"2020-09-02T05:31:31.857296"
"2020-01-09T15:25:27"
"2020-01-09T15:25:27"
219,142,963
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def calculate_paint(efficency_ltr_per_m2, *args): return sum(args)*efficency_ltr_per_m2 print(calculate_paint(2, 10, 12, 28)) area = [18, 21, 32, 42] print(calculate_paint(2, *area)) def log_it(*args): with open(r'C:\Users\barte\Documents', 'a') as f: for string in args: f.write(string + ' ') f.write('\n') log_it('Starting processing forecasting') log_it('ERROR', 'Not enough data', 'invoices', '2020')
UTF-8
Python
false
false
463
py
17
task49_args&kwargs.py
17
0.600432
0.552916
0
18
23.833333
54
biseshbhattarai/Transaction-Microservice
5,360,119,216,023
14ce7d77215848b80a8738629c73963aa760232d
d12a9ae0b4ca53f6ecc2e525e77aaefa908059c5
/shopapp/shoptrans/urls.py
e2bcbfef1d6fcba8dcf03f84c9aed39c0a15881b
[ "MIT" ]
permissive
https://github.com/biseshbhattarai/Transaction-Microservice
64a4dbb4a279a0fae7ec8a4a8760fad787c43b18
32226e339d53842d6178f50d5988880e7a7ae7c3
refs/heads/master
"2020-03-24T14:10:24.384598"
"2019-01-21T15:01:48"
"2019-01-21T15:01:48"
142,761,062
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.urls import path from . import views from .views import CreateTransaction, ProductUpdateView, SingleDayApiView,ProductApiView, CapitalApiView,UserApiView ,CategoryApiView, ProductDetailView urlpatterns = [ path('',views.main, name="main"), path('<int:pk>/', views.transaction, name="product"), path('<int:pk>/update/', ProductUpdateView.as_view(), name="update"), path('add/', CreateTransaction.as_view(), name="add"), path('dashboard/', views.dashboard_view, name="dashboard"), path('products/', ProductApiView.as_view(), name="api-product"), path('product/<int:pk>/', ProductDetailView.as_view(), name="api-detail"), path('capital/', CapitalApiView.as_view(), name="api-capital"), path('category/', CategoryApiView.as_view(), name="api-category"), path('users/', UserApiView.as_view(), name="api-users"), path('single/',SingleDayApiView.as_view(), name="api-single" ) ]
UTF-8
Python
false
false
940
py
26
urls.py
19
0.687234
0.687234
0
23
39.869565
153
TheURBN/turg
2,817,498,569,667
c179cb149a596217b8a4f8dad39dd063035df42e
b68691423cc1c937cd13994fcb2e641ca6d9cae1
/turg/logger.py
36e9000263318761401cceadc441b95e3756d49d
[ "Apache-2.0" ]
permissive
https://github.com/TheURBN/turg
54db64a35dc500364a6c20a56caffa0241ce006f
93e459ac3adc606a9f1b15acfe414fc8d3550c10
refs/heads/master
"2021-08-07T00:59:55.531564"
"2017-11-07T08:03:00"
"2017-11-07T08:03:00"
106,119,564
8
6
null
false
"2017-11-07T08:03:01"
"2017-10-07T18:07:57"
"2017-10-26T09:45:28"
"2017-11-07T08:03:01"
895
2
4
0
Python
false
null
import logging from logging import getLogger logging.basicConfig( format='[{asctime:15}] [{name}.{funcName}:{lineno}] {levelname:7} {message}', style='{', level=logging.DEBUG, ) __all__ = ['getLogger']
UTF-8
Python
false
false
216
py
21
logger.py
11
0.652778
0.638889
0
10
20.6
81
Lockeysama/DistributedCrawler
10,642,928,989,219
974bfe01aee2b8e9f675f05b68120d838ec8b307
860727752bc604ec0253f989dd4fa16a9bf1bf62
/tddc/base/util/util.py
59714a9d5eec1c69f7522c23d34ffeb6b8c343b7
[]
no_license
https://github.com/Lockeysama/DistributedCrawler
32ed72ca3c75c0b9dea5623d1f6b38e192ff4fc9
6c9bfd328fd638b6274e993371c63cf273fd121d
refs/heads/master
"2021-10-26T02:53:43.604118"
"2019-03-11T03:09:41"
"2019-03-11T03:09:41"
88,815,003
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- ''' Created on 2017年5月5日 @author: chenyitao ''' import json import time import datetime from collections import defaultdict class Singleton(type): """ 单例metaclass """ def __init__(cls, name, bases, dict): super(Singleton, cls).__init__(name, bases, dict) cls._instance = defaultdict() def __call__(cls, *args, **kw): tag = kw.get('tag') or 'default' if cls._instance.get(tag) is None: cls._instance[tag] = super(Singleton, cls).__call__(*args, **kw) return cls._instance[tag] def object2json(obj): info = {k: v for k, v in obj.__dict__.items() if v is not None and '__' not in k} return json.dumps(info) def timer(func): ''' 执行时间计算装饰器 ''' def _deco(*args, **kwargs): start = time.time() func(*args, **kwargs) end = time.time() print(end-start) return _deco def count_time(func): def int_time(*args, **kwargs): start_time = datetime.datetime.now() ret = func(*args, **kwargs) over_time = datetime.datetime.now() total_time = (over_time-start_time).total_seconds() print(func, total_time) return ret return int_time
UTF-8
Python
false
false
1,290
py
70
util.py
69
0.559429
0.55309
0
57
21.140351
76
kalekseev/django-stubs
8,881,992,368,129
e41e4fcd2b80f835d9f1b4e756996ee1e29456e4
c564a1244a6e63396b5800238092b10f6b8b4d8a
/django-stubs/contrib/gis/geos/polygon.pyi
d6aac49cefa74c87fbfdf6b4f58e41616e87beb5
[ "MIT" ]
permissive
https://github.com/kalekseev/django-stubs
efc12c8a2003efdf5d4562c1e81e7df286f0fd43
1a36c6c379693ed4a472058748e00473cd229eac
refs/heads/master
"2022-05-27T21:16:42.218106"
"2022-05-06T06:00:21"
"2022-05-06T06:00:21"
176,074,455
1
0
MIT
true
"2022-05-06T08:51:10"
"2019-03-17T08:22:55"
"2022-03-24T21:40:59"
"2022-05-06T08:51:09"
2,558
1
0
0
Python
false
false
from typing import Any from django.contrib.gis.geos.geometry import GEOSGeometry as GEOSGeometry class Polygon(GEOSGeometry): def __init__(self, *args: Any, **kwargs: Any) -> None: ... def __iter__(self) -> Any: ... def __len__(self): ... @classmethod def from_bbox(cls, bbox: Any): ... @property def num_interior_rings(self): ... exterior_ring: Any = ... shell: Any = ... @property def tuple(self): ... coords: Any = ... @property def kml(self): ...
UTF-8
Python
false
false
509
pyi
199
polygon.pyi
189
0.579568
0.579568
0
19
25.789474
73
sm2774us/Algorithms
6,236,292,513,969
1bf6b4c8709d9ba37bc5097679b3ff6be41baa3d
a79e3ffc9d8bc92d3c05d3900917811ada4a2e7e
/run_all_tests.py
ad055216a0ddfadc387a06c9862ea4be8078d4ae
[]
no_license
https://github.com/sm2774us/Algorithms
93b99e67468363db5fd87a1fb296e25cfa1421cf
917af20e4780ef9af061b5d585e5ae1650f52135
refs/heads/master
"2021-12-02T16:57:03.767556"
"2021-10-06T03:46:27"
"2021-10-06T03:46:27"
163,911,523
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python import os if __name__ == "__main__": # working_dir = os.getcwd() working_dir = os.path.dirname(os.path.realpath(__file__)) root_dir = "." # Use a one-line list comprehension to get all the files in a given directory with a given file basename. res = [ os.path.join(dp, f) for dp, dn, filenames in os.walk(root_dir) for f in filenames if os.path.basename(f) == "test.py" ] # res = [f for f in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, f)) and os.path.basename(f) == "test.py"] # os.path.join joins a directory and a filename into a path. You can also split a path name into directory and file with # os.path.split(), and you can split a filename with extension into filename and extension with os.path.splitext() # os.path.expandUser() will expand '~' to the absolute path of the current user's home directory on Windows, Linux or Mac # The rest of the os.path module: # http://docs.python.org/lib/module-os.path.html for f in res: # print("python ", os.path.join(root_dir, f)) print("Running test case(s) for the directory: " + os.path.dirname(f)) os.chdir(os.path.dirname(os.path.realpath(f))) print("Changed the working directory to " + os.getcwd()) os.system("python " + os.path.join(os.path.basename(f))) os.chdir(working_dir) print("Changed the working directory back to " + working_dir)
UTF-8
Python
false
false
1,505
py
37
run_all_tests.py
22
0.631229
0.631229
0
29
49.896552
126
Wolfwalker96/ImageProcessing-RoadDetection
816,043,825,944
be06760eaef945e4ac8fd009f0187ea4129dde39
c4f9fc7e4118ca471ede37c0c037e5822b24f6e4
/methode_one/methode_one.py
a960ddc4ed458c46245e326fde01d8076c76ca0e
[ "MIT" ]
permissive
https://github.com/Wolfwalker96/ImageProcessing-RoadDetection
a5a635fee462be222d601f2caf85f4cb03b950d4
a1321012b12e3b63c3a63cb3d2518cccc701b2ec
refs/heads/master
"2021-01-24T12:12:22.599190"
"2018-05-05T19:10:43"
"2018-05-05T19:10:43"
123,124,044
5
1
null
null
null
null
null
null
null
null
null
null
null
null
null
""" Road contourdetection - Step : FreeScale Cup Algorithm One - Test """ import cv2 import imageio import time import numpy as np import os def algorithm(filepath): img_i = cv2.imread(filepath) img = cv2.cvtColor(img_i,cv2.COLOR_BGR2GRAY) # Contrast increase # img = np.array([np.array([ 0 if pixel < 100 else 255 for pixel in row], dtype = np.uint8) for row in img], dtype = np.uint8 ) trash,img = cv2.threshold(img,100,255.0,cv2.THRESH_BINARY+cv2.THRESH_OTSU) # Open kernel = np.ones((5,5),np.uint8) # img = cv2. # img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel) img = cv2.erode(img, kernel, iterations=3) img = cv2.dilate(img, kernel, iterations=3) #cv2.imshow("Morph",img) # Contour detection ret,thresh = cv2.threshold(img,127,255,0) image, contours, hierarchy = cv2.findContours(img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #cv2.imshow("Tresh",thresh) #cv2.imshow("Image",img) cv2.waitKey(0) # Contours that touch the bottom height, width = img.shape[:2] height -= 5 # ptit marge bottom_contours = [] for c in contours: for val in c: if val[0].item(1) >= height: bottom_contours.append(c) break # biggest area r_areas = [cv2.contourArea(c) for c in contours] max_rarea = np.max(r_areas) contour = bottom_contours[0] for c in bottom_contours: if cv2.contourArea(c) == max_rarea: contour = c return cv2.drawContours(img_i, [contour], -1, (0,255,0), 3) if __name__ == "__main__": print(__doc__) inputdir = os.path.abspath("picture_freescale/15.04.16/Avant/Sequence6/") outputdir = os.path.abspath("output/") images = os.listdir(inputdir) out_images = [] if not os.path.exists(outputdir): os.mkdir("output") if not os.path.exists(os.path.join(outputdir,"animate")): os.mkdir(os.path.join(outputdir,"animate")) counter = 0 import re images.sort(key=lambda x: int(re.findall("\d+",x)[0])) for image in images: print(f"\r{counter}/{len(images)}",end="\r") if image.endswith(".JPG"): counter+=1 img = algorithm(os.path.join(inputdir, image)) out_images.append(img) cv2.imwrite(os.path.join(outputdir,f"out_{image}"), img) print("Generating Animation") imageio.mimsave(os.path.join(outputdir,"animate",f"out_{time.strftime('%m_%d_%Y %H.%M.%S')}.gif"), out_images)
UTF-8
Python
false
false
2,541
py
11
methode_one.py
9
0.605274
0.575364
0
83
29.614458
131
brekkanegg/cram
17,291,538,374,844
574115d9e1c484206df7470e4604d24c3ed7723c
5ecd254500a753ed33cd6fcca12c165ffd6ed551
/inputs.py
04a778fcd5a4dff27b2a4de4bc44d4474e2c4092
[ "MIT" ]
permissive
https://github.com/brekkanegg/cram
121a3738b09bf198107c9ef5734f54bc0b97bdba
fa5a7b6921a5e2473980504b85a41bc2d5d6b9f8
refs/heads/master
"2020-03-07T20:23:52.182434"
"2018-08-22T01:54:54"
"2018-08-22T01:54:54"
127,696,263
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import tensorflow as tf import os import numpy as np from glob import glob from PIL import Image import random from tensorflow.python.keras.utils import to_categorical slim = tf.contrib.slim """ issue: 이미지 스케일: 0-1 feed_dict로 변경 """ def unpickle(file): import pickle with open(file, 'rb') as fo: dict = pickle.load(fo, encoding='bytes') return dict class base_saliency_model(): def __init__(self, image_size, reuse): self.image_size = image_size self.checkpoint_dir = os.path.join('base_saliency', 'checkpoint', str(image_size)) self.summary_dir = os.path.join('base_saliency', 'summary', str(image_size)) self.build_model(reuse) def build_model(self, reuse): config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) # do not close session self.sess.run(tf.local_variables_initializer()) self.x = tf.placeholder(tf.float32, shape=[None, self.image_size, self.image_size, 3], name='x') # encoder-decoder net = self.x # input if self.image_size == 224: # vgg16 n_filters = [16, 32, 64, 128, 128] elif self.image_size == 32: # vgg16 n_filters = [16, 32, 64] with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], activation_fn=None, weights_initializer=tf.contrib.layers.xavier_initializer(), weights_regularizer=slim.l2_regularizer(0.001), reuse=reuse): with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True, activation_fn=tf.nn.elu, updates_collections=None, is_training=False, reuse=reuse): #is_training: false # encoder print('encoder') for i in range(len(n_filters)): net = slim.conv2d(net, n_filters[i], 3, stride=1, padding='SAME', scope='conv1_' + str(i)) net = slim.batch_norm(net, scope='e_bn1_' + str(i)) print(net.shape) net = slim.conv2d(net, n_filters[i], 3, stride=2, padding='SAME', scope='conv2_' + str(i)) net = slim.batch_norm(net, scope='e_bn2_' + str(i)) print(net.shape) # fc # todo: use fc????? # net = slim.conv2d(net, n_filters[-1]*2, 3, stride=8, padding='VALID', scope='fc1') # print(net.shape)5 print('decoder') net = slim.conv2d(net, n_filters[-1] * 2, 1, stride=1, padding='SAME', scope='fc2') net = slim.batch_norm(net, scope='fc_bn') print(net.shape) # net = slim.conv2d_transpose(net, n_filters[-1], 3, stride=8, padding='SAME', scope='deconv1_' + '-1') # print(net.shape) # decoder # print('decoder') for i in range(len(n_filters)): net = slim.conv2d_transpose(net, n_filters[::-1][i], 3, stride=2, padding='SAME', scope='deconv1_' + str(i)) net = slim.batch_norm(net, scope='d_bn1_' + str(i)) print(net.shape) net = slim.conv2d_transpose(net, n_filters[::-1][i], 3, stride=1, padding='SAME', scope='deconv2_' + str(i)) net = slim.batch_norm(net, scope='d_bn2_' + str(i)) print(net.shape) # reshape print('reshape') net = slim.conv2d_transpose(net, 4, 1, padding='SAME', scope='reshape1') net = slim.batch_norm(net, scope='r_bn') print(net.shape) # last layer net = slim.conv2d_transpose(net, 1, 1, padding='SAME', scope='reshape2') print(net.shape) pred1 = tf.nn.sigmoid(net) pred = tf.reshape(pred1, shape=[-1, self.image_size, self.image_size]) # n_filters = [16, 32, 64, 128, 128] # # with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], # activation_fn=tf.nn.relu, # weights_initializer=tf.contrib.layers.xavier_initializer(), # normalizer_fn=slim.batch_norm, # weights_regularizer=slim.l2_regularizer(0.001), # reuse=reuse): # # encoder # for i in range(len(n_filters)): # net = slim.conv2d(net, n_filters[i], 3, stride=1, padding='SAME', scope='conv1_' + str(i)) # net = slim.conv2d(net, n_filters[i], 3, stride=2, padding='SAME', scope='conv2_' + str(i)) # # net = slim.conv2d(net, n_filters[-1] * 2, 1, stride=1, padding='SAME', scope='fc2') # # # decoder # for i in range(len(n_filters)): # net = slim.conv2d_transpose(net, n_filters[::-1][i], 3, stride=2, padding='SAME', # scope='deconv1_' + str(i)) # net = slim.conv2d_transpose(net, n_filters[::-1][i], 3, stride=1, padding='SAME', # scope='deconv2_' + str(i)) # # # reshape # net = slim.conv2d_transpose(net, 4, 1, padding='SAME', scope='reshape1') # net = slim.conv2d_transpose(net, 1, 1, padding='SAME', activation_fn=None, scope='reshape2') # # pred1 = tf.nn.sigmoid(net) # pred = tf.reshape(pred1, shape=[-1, self.image_size, self.image_size]) self.pred = pred1 # shape should be rank 4 # load model print('\nCheckpoint of base saliency model: ', self.checkpoint_dir) print(" [*] Reading Checkpoint...") ckpt = tf.train.get_checkpoint_state(self.checkpoint_dir) saver = tf.train.Saver(max_to_keep=4) saver.restore(self.sess, ckpt.model_checkpoint_path) def get_saliency(self, images): rough_saliency = self.sess.run([self.pred], feed_dict={self.x: images}) rough_saliency = np.reshape(rough_saliency[0], [len(images), self.image_size, self.image_size, 1]) # [images.shape[0], self.image_size, self.image_size, 1]) return rough_saliency ################ class dataloader_cifar10(object): def __init__(self, batch_size, saliency=False, mode='train', reuse=False, sep=False, x255=False): self.saliency = saliency self.mode = mode self.image_size = 32 self.class_num = 10 self.sep = sep self.x255 = x255 if mode == 'train': #or mode == 'control': xs = {} ys = {} for i in range(1, 6): train_i = unpickle("data/cifar-10-batches-py/data_batch_{}".format(i)) x_i = np.reshape(train_i[b'data'], [-1, 3, 32, 32]) x_i = x_i.transpose([0, 2, 3, 1]) y_i = train_i[b'labels'] xs[i] = x_i ys[i] = y_i x = np.concatenate([xs[i] for i in range(1, 6)]) y = np.concatenate([ys[i] for i in range(1, 6)]) elif mode == 'val': test = unpickle("data/cifar-10-batches-py/test_batch") x = np.reshape(test[b'data'], [-1, 3, 32, 32]) x = x.transpose([0, 2, 3, 1]) y = test[b'labels'] # from tensorflow.python.keras._impl.keras.datasets.cifar10 import load_data # # if mode == 'train': #or mode == 'control': # (x, y), (_, _) = load_data() # # elif mode == 'val': # (_, _), (x, y) = load_data() # # else: # test, control # (_, _), (x, y) = load_data() self.x = x # y = y[:, 0] self.y = y # y_one_hot = to_categorical(y, num_classes=self.class_num) # self.y = y_one_hot if saliency: self.saliency_model = base_saliency_model(self.image_size, reuse=reuse) self.batch_size = batch_size self.data_count = x.shape[0] self.num_batch = int(self.data_count / self.batch_size) self.pointer = 0 def next_batch(self): self.pointer = (self.pointer + 1) % self.num_batch start_pos = self.pointer * self.batch_size batch_images = self.x[start_pos:start_pos + self.batch_size] batch_labels = self.y[start_pos:start_pos + self.batch_size] if self.saliency: batch_saliencies = self.saliency_model.get_saliency(batch_images) if self.x255: batch_saliencies *= 255 batch_images = np.concatenate([batch_images, batch_saliencies], axis=3) return batch_images, batch_labels def shuffle(self): combined = list(zip(self.x, self.y)) random.shuffle(combined) self.x, self.y = zip(*combined) ########### class dataloader_cifar100(object): def __init__(self, batch_size, saliency=False, mode='train', reuse=False, sep=False, x255=False, coarse_label=False): self.saliency = saliency self.mode = mode self.image_size = 32 self.class_num = 100 self.sep = sep self.x255 = x255 if mode == 'train': #or mode == 'control': train = unpickle("data/cifar-100-python/train") x = np.reshape(train[b'data'], [-1, 3, 32, 32]) x = x.transpose([0, 2, 3, 1]) y = train[b'fine_labels'] if coarse_label: y = train[b'coarse_labels'] elif mode == 'val': test = unpickle("data/cifar-100-python/test") x = np.reshape(test[b'data'], [-1, 3, 32, 32]) x = x.transpose([0, 2, 3, 1]) y = test[b'fine_labels'] if coarse_label: y = test[b'coarse_labels'] else: # test, control pass self.x = x # y = y[:, 0] self.y = y # y_one_hot = to_categorical(y, num_classes=self.class_num) # self.y = y_one_hot if saliency: self.saliency_model = base_saliency_model(self.image_size, reuse=reuse) self.batch_size = batch_size self.data_count = x.shape[0] self.num_batch = int(self.data_count / self.batch_size) self.pointer = 0 def next_batch(self): self.pointer = (self.pointer + 1) % self.num_batch start_pos = self.pointer * self.batch_size batch_images = self.x[start_pos:start_pos + self.batch_size] batch_labels = self.y[start_pos:start_pos + self.batch_size] if self.saliency: batch_saliencies = self.saliency_model.get_saliency(batch_images) if self.x255: batch_saliencies *= 255 batch_images = np.concatenate([batch_images, batch_saliencies], axis=3) return batch_images, batch_labels def shuffle(self): combined = list(zip(self.x, self.y)) random.shuffle(combined) self.x, self.y = zip(*combined) ########### class dataloader_cub200(object): def __init__(self, batch_size, saliency=False, mode='train', reuse=False, sep=False, x255=False): self.saliency = saliency self.mode = mode self.image_size = 224 self.class_num = 200 self.sep = sep self.x255 = x255 with open('data/CUB_200_2011/images.txt', 'r') as f: img_dir = f.read().split('\n') img_dir.remove('') with open('data/CUB_200_2011/train_test_split.txt', 'r') as f: train_test = f.read().split('\n') train_test.remove('') with open('data/CUB_200_2011/image_class_labels.txt', 'r') as f: img_class = f.read().split('\n') img_class.remove('') import pandas as pd df = pd.DataFrame() df['img_dir'] = ['data/CUB_200_2011/images/'+i.split(' ')[1] for i in img_dir] df['is_train'] = [i.split(' ')[1] for i in train_test] df['class'] = [i.split(' ')[1] for i in img_class] df['ann'] = ['data/CUB_200_2011/segmentations/' + i.split(' ')[1][:-3] + 'png' for i in img_dir] train, test = df[df['is_train'] == '1'], df[df['is_train'] == '0'] if mode == 'train' or mode == 'gt': x, y = np.array(train['img_dir']), np.array(train['class']).astype('int') - 1 elif mode == 'val': x, y = np.array(test['img_dir']), np.array(test['class']).astype('int') - 1 else: # test, control pass self.x = x self.y = y if mode == 'gt': self.s = np.array(train['ann']) elif saliency: self.saliency_model = base_saliency_model(self.image_size, reuse=reuse) self.batch_size = batch_size self.data_count = len(x) self.num_batch = int(self.data_count / self.batch_size) self.pointer = 0 # fixme: def next_batch(self): self.pointer = (self.pointer + 1) % self.num_batch start_pos = self.pointer * self.batch_size batch_images_dir = self.x[start_pos:start_pos + self.batch_size] def to_rgb2(im): w, h = im.shape ret = np.empty((w, h, 3), dtype=np.uint8) ret[:, :, :] = im[:, :, np.newaxis] return ret temp_bi = [np.array(Image.open(_d).resize([self.image_size, self.image_size])) if len(np.array(Image.open(_d)).shape) is 3 else to_rgb2(np.array(Image.open(_d).resize([self.image_size, self.image_size]))) for _d in batch_images_dir] batch_images = np.array(temp_bi) batch_labels = self.y[start_pos:start_pos + self.batch_size] if self.mode == 'gt': batch_saliencies_dir = self.s[start_pos:start_pos + self.batch_size] temp_bs = [np.array(Image.open(_d).resize([self.image_size, self.image_size])) for _d in batch_saliencies_dir] #fixeme: _temp_bs = [np.reshape(b, [self.image_size, self.image_size, 1]) for b in temp_bs] batch_saliencies = np.array(_temp_bs) if self.x255: batch_saliencies *= 255 batch_images = np.concatenate([batch_images, batch_saliencies], axis=3) elif self.saliency: batch_saliencies = self.saliency_model.get_saliency(batch_images) if self.x255: batch_saliencies *= 255 batch_images = np.concatenate([batch_images, batch_saliencies], axis=3) return batch_images, batch_labels def shuffle(self): if self.mode == 'gt': combined = list(zip(self.x, self.y, self.s)) random.shuffle(combined) self.x, self.y, self.s = zip(*combined) else: combined = list(zip(self.x, self.y)) random.shuffle(combined) self.x, self.y = zip(*combined) # outdated # class dataloader_cub200(object): # def __init__(self, batch_size, saliency=False, mode='train'): # self.saliency = saliency # self.mode = mode # self.image_size = 224 # self.class_num = 200 # # x = glob('data/CUB_200_2011/images/*/*.jpg') # import re # regex = re.compile(r'images/\d\d\d.') # y = [int(regex.search(xx).group()[7:-1]) for xx in x] # y = np.array(y).astype(int) - 1 # # cut = int(len(y) * 0.6) # # combined = list(zip(x, y)) # random.seed(327) # random.shuffle(combined) # x, y = zip(*combined) # shuffle order # # if mode == 'train' or mode == 'control' or mode == 'gt': # x, y = x[:cut], y[:cut] # # elif mode == 'val': # x, y = x[cut:], y[cut:] # # else: # test, control # pass # # self.x = x # self.y = y # # if saliency: # if not mode == 'control' and not mode == 'gt': # reuse = False # if mode == 'val': # reuse = True # self.saliency_model = base_saliency_model(self.image_size, reuse=reuse) # # if mode == 'gt': # self.s = glob('data/CUB_200_2011/segmentations/*/*.png') # # # self.batch_size = batch_size # self.data_count = len(x) # self.num_batch = int(self.data_count / self.batch_size) # self.pointer = 0 # # # # fixme: # def next_batch(self): # self.pointer = (self.pointer + 1) % self.num_batch # # start_pos = self.pointer * self.batch_size # # batch_images_dir = self.x[start_pos:start_pos + self.batch_size] # # def to_rgb2(im): # w, h = im.shape # ret = np.empty((w, h, 3), dtype=np.uint8) # ret[:, :, :] = im[:, :, np.newaxis] # return ret # # temp_bi = [np.array(Image.open(_d).resize([self.image_size, self.image_size])) # if len(np.array(Image.open(_d)).shape) is 3 # else to_rgb2(np.array(Image.open(_d).resize([self.image_size, self.image_size]))) # for _d in batch_images_dir] # batch_images = np.array(temp_bi) # # batch_labels = self.y[start_pos:start_pos + self.batch_size] # # if self.saliency: # if self.mode == 'control': # _batch_saliencies = [(np.random.randn(self.image_size, self.image_size)*255).astype('uint8') # for i in range(self.batch_size)] # batch_saliencies = [np.reshape(b, [self.image_size, self.image_size, 1]) # for b in _batch_saliencies] # elif self.mode == 'gt': # # batch_saliencies_dir = self.s[start_pos:start_pos + self.batch_size] # temp_bs = [np.array(Image.open(_d).resize([self.image_size, self.image_size])) # # if len(np.array(Image.open(_d)).shape) is 3 # # else to_rgb2(np.array(Image.open(_d).resize([self.image_size, self.image_size]))) # for _d in batch_saliencies_dir] # _temp_bs = [np.reshape(b, [self.image_size, self.image_size, 1]) # for b in temp_bs] # batch_saliencies = np.array(_temp_bs) # # else: # batch_saliencies = self.saliency_model.get_saliency(batch_images) # # batch_images = np.concatenate([batch_images, batch_saliencies], axis=3) # # return batch_images, batch_labels
UTF-8
Python
false
false
18,968
py
6
inputs.py
5
0.510449
0.489446
0
511
36.082192
119
duanbibo/UIAUTO
7,988,639,204,051
219df97801d51620d1cbe1888fcb372bff06910f
0e9f9ec47457fe148120601b75b31b89e8180a62
/pages/index/patientPage.py
b0f9a3d50f3e6e477da48a1f48edb570552ab0ce
[]
no_license
https://github.com/duanbibo/UIAUTO
5a2beef7fc48d5692e58406b29c212b80c84c9ea
97b7ea508146cf293d90756e80161eeb4b91b4b6
refs/heads/master
"2020-06-23T06:24:53.070009"
"2019-07-24T08:20:42"
"2019-07-24T08:20:42"
198,543,153
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
''' 患者页面 ''' from common import basePage import time class PatientPage (basePage.BasePage): ''' 心之力机构患者相关元素 ''' xzl_intohuanzhe=('xpath','//*[@class="institution-department"]/ul/li[3]/a') xzl_addone=('xpath','//*[@id="wrapper"]/div/div/div[2]/div/section/div[2]/div/div/div/div[2]/div[1]/button[1]') xzl_tel=('id','tel') xzl_clickchoseyiyuan=('xpath','//*[@class="choose__role"]/div/div/div/div/div') xzl_choseyiyuan=('xpath','//*[@title="测试新建一级医院机构"]') xzl_choseyisheng=('xpath','//*[@class="choose__role-content"]/div/div/img') #选择第一个医师 xzl_addok=('xpath','//*[@class="patient--add-btn"]') ''' 患者护士页面相关操作 ''' def xinzhili_intopatient(self): '''点击进入患者页签页面''' self.click(self.xzl_intohuanzhe) time.sleep(3) def xinzhili_addone(self,tel=0): '''单个添加患者''' self.click(self.xzl_addone) self.sendKeys(self.xzl_tel,text=tel) self.click(self.xzl_clickchoseyiyuan) time.sleep(1) self.click(self.xzl_choseyiyuan) time.sleep(1) self.click(self.xzl_choseyisheng) self.click(self.xzl_addok) time.sleep(1) self.get_screen(file_name="患者添加成功") time.sleep(1)
UTF-8
Python
false
false
1,391
py
30
patientPage.py
18
0.578781
0.569279
0
46
26
115
refikkocoglu/report-generator
4,604,204,956,443
3b8c4e397c302a938a0db1f33a46405831f5e4ff
63de37890d4cb99f4925a7f27aa25faff3c76b09
/utils/getent_group.py
781d3d36cbb24852c9c6ff8bfcc95911b7b85b3c
[]
no_license
https://github.com/refikkocoglu/report-generator
e2aeeb54009d0d063a7ad37615732c07252834a9
16feabcaeb68933b1536d392fc09821a78c4f87b
refs/heads/master
"2020-09-06T15:59:59.032588"
"2020-02-10T14:44:23"
"2020-02-10T14:44:23"
220,472,931
1
0
null
true
"2019-11-08T13:31:12"
"2019-11-08T13:31:11"
"2019-09-13T10:22:47"
"2019-09-13T10:22:57"
247
0
0
0
null
false
false
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright 2019 Gabriele Iannetti <g.iannetti@gsi.de> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import logging import subprocess import os def get_user_groups(): user_groups = list() output = subprocess.check_output(['getent', 'group']).decode() output_lines = output.strip().split('\n') for line in output_lines: fields = line.split(':', 3) group = fields[0] gid = int(fields[2]) if gid > 999: logging.debug("Found User Group %s:%s" % (group, gid)) user_groups.append(group) else: logging.debug("Ignoring User Group: %s:%s" % (group, gid)) return user_groups
UTF-8
Python
false
false
1,314
py
27
getent_group.py
20
0.66895
0.659056
0
45
28.155556
71
KylieLAnglin/agileteacher
5,231,270,168,497
25d3cbcc9ee954e6c849d3a71de4aa6b0b6b3216
71211e5435c4eb6f2ab847a5ccd2eb0c1b300b69
/EAD/hear_vs_hope.py
49ae916baebeba10079ce999d34abf852f6e6fb7
[]
no_license
https://github.com/KylieLAnglin/agileteacher
586d247fd276071dec14e1e9495e7423f74581a1
682c3c70debe8e71314b61b166339aec7becc647
refs/heads/main
"2023-04-05T19:48:59.623705"
"2021-05-10T11:30:32"
"2021-05-10T11:30:32"
313,020,186
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# %% import pandas as pd import statsmodels.formula.api as smf HEAR = start.RAW_DATA_PATH + "LIWC_Q65.csv" HOPE = start.RAW_DATA_PATH + "LIWC_Q66.csv" hear = pd.read_csv(HEAR) hope = pd.read_csv(HOPE) hear = qualtrics.select_valid_rows(hear) hope = qualtrics.select_valid_rows(hope) cols = qualtrics.search_column_labels( survey1_labels, "What did you hear students say in the discussion?" ) hear = hear[["Q65", "RecipientEmail", "WC", "Analytic", "Clout", "Authentic", "Tone"]] hear = hear.rename({"Q65": "text", "RecipientEmail": "email"}) hear["hear"] = 1 hear["hope"] = 0 hope = hope[ [ "Q66", "RecipientEmail", "WC", "Analytic", "Clout", "Authentic", "Tone", ] ] hope = hope.rename({"Q66": "text", "RecipientEmail": "email"}) hope["hear"] = 0 hope["hope"] = 1 df = hear.append(hope) # %% Compare word counts # On average, teachers use 15 more words in describing what they heard than describing what they hope to hear model = "WC ~ 1 + hope" results = smf.ols(model, data=df).fit() print(results.summary()) # %% More analytic when they talk about what they hope to hear # a high number reflects formal, logical, and hierarchical # thinking; lower numbers reflect more informal, personal, here-and-now, and # narrative thinking. model = "Analytic ~ 1 + hope" results = smf.ols(model, data=df).fit() print(results.summary()) # %% # %% ### # Clout -- a high number suggests that the author is speaking from the perspective # of high expertise and is confident; low Clout numbers suggest a more tentative, # humble, even anxious style ## model = "Clout ~ 1 + hope" results = smf.ols(model, data=df).fit() print(results.summary()) # %% #### # Authentic -- - higher numbers are associated with a more honest, personal, and # disclosing text; lower numbers suggest a more guarded, distanced form of # discourse ## model = "Authentic ~ 1 + hope" results = smf.ols(model, data=df).fit() print(results.summary()) # %% # Emotional - a high number is associated with a more positive, upbeat style; # a low number reveals greater anxiety, sadness, or hostility. A number around 50 # suggests either a lack of emotionality or different levels of ambivalence. model = "Tone ~ 1 + hope" results = smf.ols(model, data=df).fit() print(results.summary()) # %%
UTF-8
Python
false
false
2,328
py
24
hear_vs_hope.py
24
0.679124
0.667955
0
83
27.048193
109
hr9457/EDD_1S2019_P1_201314296
5,686,536,708,191
f8f10d1a3829feb5d74a1c65ecbb7a087026d48f
dd72a26fb61f22078a900603ff2ce174595b7f38
/Cola.py
2c1a3d8052e39e953f8bf529bf7c3316e648b8d5
[]
no_license
https://github.com/hr9457/EDD_1S2019_P1_201314296
43cf029139b03b9af5a98d2cd70bb9577abf0818
4e487e537dd640d41bb2d7899c536b078f474d60
refs/heads/master
"2022-02-05T21:44:17.759332"
"2019-08-15T05:04:42"
"2019-08-15T05:04:42"
200,438,152
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import os # importaciones # importacion para lectura y escritura de un archivo from io import * # clase nodo # estructura del nodo class Nodo: # metodo contructor def __init__(self,nombreUsuario,score): self.nombreUsuario = nombreUsuario self.score = score self.siguienteCola = None #------------------------------------------------------------------------------------------------- # estructura de la cola # clase cola class Cola: # metodo contructor def __init__(self): self.primeroCola = None self.ultimoCola = None self.sizeCola = 0 #------------------------------------------------------------------------------------------------- # metodo para agregar en la cola def addCola(self,nombreUsuario,score): if self.primeroCola == None and self.ultimoCola==None: # creacion de un nuevo nodo nuevoNodo = Nodo(nombreUsuario,score) self.primeroCola = nuevoNodo self.ultimoCola = nuevoNodo self.sizeCola += 1 else: # creacion de un nuevo nodo nuevoNodo = Nodo(nombreUsuario,score) self.ultimoCola.siguienteCola = nuevoNodo self.ultimoCola = nuevoNodo self.sizeCola += 1 #------------------------------------------------------------------------------------------------- # metodo para elminar de la cola def unqueued(self): if self.primeroCola == None and self.ultimoCola == None: print("la cola esta vacia") elif self.primeroCola == self.ultimoCola: self.primeroCola = None self.ultimoCola = None self.sizeCola -= 1 else: temporal = self.primeroCola self.primeroCola = self.primeroCola.siguienteCola temporal = None self.sizeCola -= 1 #------------------------------------------------------------------------------------------------- # get tamaniio de la cola actual def getSizeCola(self): return self.sizeCola #------------------------------------------------------------------------------------------------- # metodo para imprimir la cola def printCola(self): # variable temporal primeroTemporal = self.primeroCola # tamanio acutal de la cola temporalSize = self.sizeCola while temporalSize > 0: print(primeroTemporal.nombreUsuario) primeroTemporal = primeroTemporal.siguienteCola temporalSize -= 1 #------------------------------------------------------------------------------------------------- #---------------Escritura del reporte de la cola-------------------------------------- def GraCola(self): #----------------------------------------------------------------------------------------------------- #lectura del archivo y limpiado del archivo archivo_texto = open("Cola.txt","r") lineas = archivo_texto.readlines() archivo_texto.close() #vaciado del archivo archivo_texto = open("Cola.txt","w") for lineas in lineas: archivo_texto.write("") archivo_texto.close() #------------------------------------------------------------------------------------------------------ #sobre-escritura para genera graphviz archivo_texto = open("Cola.txt","w") archivo_texto.write("digraph{\n")#encabezado archivo_texto.write("rankdir=LR;\n")#direccion archivo_texto.write("subgraph cluster_0{color = lightgrey; node[shape=record]\n")#poscion del titulo #recorrimiento de la lista # variable temporal primeroTemporal = self.primeroCola # tamanio acutal de la cola temporalSize = self.sizeCola numeroDeNodo = 0 # variable para enumerar los nodos while temporalSize > 0: archivo_texto.write("Nodo"+str(numeroDeNodo)+"[label=\" { "+str(primeroTemporal.nombreUsuario)+","+ str(primeroTemporal.score)+ " | } \"];\n" ) primeroTemporal = primeroTemporal.siguienteCola numeroDeNodo += 1 #aumenta el numero del nodo temporalSize -= 1 #disminue el numero para recorrer la lista #para la union de nodos if temporalSize <= 0: numeroDeNodo = numeroDeNodo - 1 # variable para enumerar los nodos while temporalSize +1 < self.sizeCola: archivo_texto.write("Nodo"+str(numeroDeNodo)+"->Nodo"+str(numeroDeNodo-1)+"\n") numeroDeNodo -= 1 temporalSize += 1 break #fin del ciclo while archivo_texto.write("Nodo"+str(numeroDeNodo)+"->Null\n") #fin del recorrido de la cola archivo_texto.write("label = \" Cola \";\n")#titulo del grafo archivo_texto.write("}\n")# archivo_texto.write("}\n")# archivo_texto.close()#cierre del archivo #------------------------------------------------------------------------------------------------- #-----------apertura del reporte que se genero------------------------------------------------------------ def imagenDot(self): #creacon de la imagen dot os.system("dot -Tpng C:\\Users\\HECTOR\\Documents\\EDD\\EDD_1S2019_P1_201314296\\Cola.txt -o C:\\Users\\HECTOR\\Documents\\EDD\\EDD_1S2019_P1_201314296\\Cola.png ") #apertura de la imagen dot os.system("C:\\Users\\HECTOR\\Documents\\EDD\\EDD_1S2019_P1_201314296\\Cola.png" ) #-------------------------------------------------------------------------------------------------
UTF-8
Python
false
false
5,695
py
16
Cola.py
11
0.482177
0.471115
0
134
41.477612
173
rileyjohngibbs/hard-twenty
9,603,546,905,460
c0140a33e226de272808407beb837d416b341c2a
3b5afb3c5d274d2068d9d7115d751cab71572843
/compose_and_test/test_compose.py
3b3f813cf07ea8e919aa645874caa68272dff349
[]
no_license
https://github.com/rileyjohngibbs/hard-twenty
15ace68e7367813cf1594c5f453c4107cc825927
02dfc697554058e87575df4b3f8707387e9e95c6
refs/heads/master
"2022-11-09T02:01:57.099232"
"2022-10-19T16:19:22"
"2022-10-19T16:19:22"
58,075,059
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import unittest from compose import * from compose_oneline import compose as compose_oneline class SimpleTestCase(unittest.TestCase): def test_compose(self): c = compose(add, mult, square) self.assertEqual(144, c(5)) def test_oneline(self): c = compose_oneline(add, mult, square) self.assertEqual(144, c(5)) class StringTestCase(unittest.TestCase): def test_compose(self): with self.assertRaises(TypeError): c = compose(add, mult, square) c("hello") def test_oneline(self): with self.assertRaises(TypeError): c = compose_oneline(add, mult, square) c("hello") class NonFunctionTestCase(unittest.TestCase): def test_compose(self): with self.assertRaises(TypeError): c = compose(add, 5) c(5) def test_oneline(self): with self.assertRaises(TypeError): c = compose_oneline(add, 5) c(5) class NoArgsTestCase(unittest.TestCase): def test_compose(self): with self.assertRaises(IndexError): c = compose() c(5) def test_oneline(self): c = compose_oneline() self.assertEqual(5, c(5)) if __name__ == '__main__': unittest.main()
UTF-8
Python
false
false
1,092
py
41
test_compose.py
35
0.698718
0.684982
0
52
20
54
gschen/where2go-python-test
12,068,858,114,472
747160b74c65d80f6cf810c8f89014e0000f9aab
dd4622390bb7122db37a889e647f28004a265ccd
/1906101059王曦/11月/day20191105/10.7.py
60ac11a5ba0549540f45cbda95d4866c7e19a1a3
[]
no_license
https://github.com/gschen/where2go-python-test
c8adaa095aa8a095401f98c7b3f8f17c72d0532d
eb9e6011b8d33fa45a6b1ef853affd01a296db21
refs/heads/master
"2022-12-13T19:45:09.874691"
"2020-10-15T13:12:52"
"2020-10-15T13:12:52"
211,591,156
4
1
null
false
"2022-12-05T07:46:12"
"2019-09-29T02:35:00"
"2020-10-15T13:13:10"
"2022-12-05T07:46:11"
79,858
3
1
24
Python
false
false
s = input('请输入一串字符:') list1 = list(s) list2 = [] list3 = [] for i in range(len(list1)+1): for n in range(len(list1)+1): if i<n and len(list1[i:n])==len(set(list1[i:n])): list2.append(len(list1[i:n])) list3.append(list1[i:n]) print(max(list2)) print(list3[list2.index(max(list2))])
UTF-8
Python
false
false
332
py
1,595
10.7.py
1,420
0.575472
0.522013
0
11
27.818182
57
DJones0101/PHYS201
4,750,233,875,096
d4acbf44b02d60e371b04880c4210ed8a66c468a
85dc3ec228502c10aa32110cba934e1d181d0f8d
/lab5.py
c5ea9b3e422171db62e5c5e8b0cc737ea98a1b29
[]
no_license
https://github.com/DJones0101/PHYS201
5ce878e581e71dfe256c99481c80e6e3f2e55dfb
60d7699b8e0de93c1d36428584713b19d60028f9
refs/heads/master
"2020-03-31T01:16:10.276503"
"2018-12-06T19:02:09"
"2018-12-06T19:02:09"
151,774,101
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python3 import matplotlib.pyplot as plt, pylab def part1(): # I (amps) x = [3.12, 6.18, 9.28, 12.36, 15.52, 18.62, 21.8, 24.9, 28.0, 31.4] # V (Volts) y = [ float(v) for v in range(1, 11)] # This gives m,b from (y = mx + b) m, b = pylab.polyfit(x, y, 1) # This calculates the slope for us slope = pylab.polyval([m,b], x) # Acordding to the lab R = m, but it's * 10 ^-3 R_slope = m * 10 ** -3 R_dmm = .000325 print("Part 1") print("R_slope = %f" %(R_slope)) percent_diff = (abs(R_dmm - R_slope) / R_dmm + R_slope / 2) * 100 print("percent difference %f" %(percent_diff)) plt.plot(x, slope, "-r", label="slope") plt.scatter(x,y) plt.title("Carbon Resistor ") plt.ylabel("V (Volts)") plt.xlabel("I (amps) * 10^-3") plt.show() def part2(): # I (amps) x = [67.3, 93.0, 114.7, 133.6, 150.4, 166.4, 180.9, 194.6] # V (Volts) y = [ (v * .50) for v in range(0, 8)] # This gives m,b from (y = mx + b) m, b = pylab.polyfit(x, y, 1) # This calculates the slope for us slope = pylab.polyval([m,b], x) # Acordding to the lab R_min and R_max are the lowest and highest values of the slope # removing the first value becaue it's negative slope1 = list(filter(lambda x: x > 0, slope)) print(slope1) R_min = min(slope1) * 10 ** -3 R_max = max(slope1) * 10 ** -3 print("Part 2") print("R_min = %f" %(R_min)) print("R_max = %f" %(R_max)) percent_diff = (abs(R_max - R_min) / R_max + R_min / 2) * 100 print("percent difference %f" %(percent_diff)) plt.plot(x, slope, "-r", label="slope") plt.scatter(x,y) plt.title("Light Bulb") plt.ylabel("V (Volts)") plt.xlabel("I (amps) * 10^-3") plt.show() def main(): part1() part2() if __name__ == "__main__": main()
UTF-8
Python
false
false
1,723
py
4
lab5.py
3
0.583865
0.516541
0
76
21.684211
86
szabowexler/Udacity
9,792,525,436,141
c275cdc891fc5e2631f4acdae4ae1cf7a47d2f14
3f66f99ecd3a44d1b70465225fd3f0ccb78e9e5c
/ud120-intro-to-machine-learning/ud120-projects/svm/svm_author_id.py
79279ee4408ff2cd26bac1b8429aa829944dd3f7
[]
no_license
https://github.com/szabowexler/Udacity
e32461d61b99a75a8e0809100fd6e2dd64d489ab
01dc6d9995c74151320ab8ebaa89fe67e498accc
refs/heads/master
"2021-01-09T06:49:56.304451"
"2017-03-31T05:34:01"
"2017-03-31T05:34:01"
81,118,679
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python """ This is the code to accompany the Lesson 2 (SVM) mini-project. Use a SVM to identify emails from the Enron corpus by their authors: Sara has label 0 Chris has label 1 """ import sys from time import time sys.path.append("../tools/") from email_preprocess import preprocess ### features_train and features_test are the features for the training ### and testing datasets, respectively ### labels_train and labels_test are the corresponding item labels features_train, features_test, labels_train, labels_test = preprocess() ######################################################### from sklearn.svm import SVC classifier = SVC(kernel="rbf", C=10000) t0 = time() classifier.fit(features_train, labels_train) print "SVM training time", round(time() - t0, 3), "s" pred = classifier.predict(features_test) print "pred[10] = ", pred[10] print "pred[26] = ", pred[26] print "pred[50] = ", pred[50] chris = sum(pred) print "there are ", chris, " emails written by Chris!" t0 = time() score = classifier.score(features_test, labels_test) print "SVM testing time", round(time() - t0, 3), "s" print "SVM accuracy =", score #########################################################
UTF-8
Python
false
false
1,229
py
7
svm_author_id.py
6
0.633849
0.612693
0
43
27.534884
76
jin2313/gis_web_01
18,176,301,615,495
58e6d5bea1cae39f68e1dab11d364c3cb4a237aa
fcef97d8553d4fc179d69af09475df3cb72a4b1b
/articleapp/models.py
d4c1a763fa435c133c978695c883a46908a51809
[]
no_license
https://github.com/jin2313/gis_web_01
642ec6bdeef7fb80fb8294874f7da4702097da5b
0aab339077919bff476e1fe53022e39e3dee9546
refs/heads/master
"2023-08-20T13:37:46.742513"
"2021-10-06T00:43:08"
"2021-10-06T00:43:08"
381,541,653
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.contrib.auth.models import User from django.db import models # Create your models here. from projectapp.models import Project class Article(models.Model): writer = models.ForeignKey(User, on_delete=models.SET_NULL, related_name='article', null=True) # null: 입력하지 않아도 된다는 게 아니라 db 내에서 null이 가능하다는 것 # ForeignKey: 1대다로 연결해 주는 것 -> 한사람이 게시글 하나만 쓸 수 있는 것은 아니기 때문에 # related_name: 유저와 연결되어 있는 article을 접근할 때 쓰는 이름 project = models.ForeignKey(Project, on_delete=models.SET_NULL, related_name='article', null=True, blank=True) # blank: 입력할 때 넣지 않아도 된다는 것 # related_name: 게시에서 게시글을 접근할 때 사용하는 것 title = models.CharField(max_length=200, null=True) image = models.ImageField(upload_to='article/', null=True) # 핀터레스트 기반이라 이미지에 null을 주면 복잡해짐 -> 걍 없이 할 것 # upload_to: media 폴더 내에 article이라는 폴더를 새로 생성해 거기에 이미지 저장 content = models.TextField(null=True, blank=True) # TestField: 긴 텍스트를 받을 때 쓰는 것 created_at = models.DateField(auto_now_add=True) # auto_now_add: 굳이 서버에 설정하거나 사용자에게 입력받지 않아도 db에 생성된 시간을 기록해 자동으로 출력하는 것 like = models.IntegerField(default=0)
UTF-8
Python
false
false
1,511
py
6
models.py
5
0.714932
0.710407
0
21
51.666667
146
Jash271/azadi
5,007,931,906,882
76ff306bd6804ab6a387868631c8d06a0cf2eb60
61bc416290d76f6ed6168d6e5b7a38a10f39990b
/AzadiApp/migrations/0006_watch_trusted_users.py
52650330d7ecfbe2decdcda765ad822cbacdd8f6
[]
no_license
https://github.com/Jash271/azadi
fe502408583f15afc496c124a640997f77bddbfc
5beb19c5a3df721d1dbbef97f9528c71dc223df9
refs/heads/master
"2020-12-29T12:14:04.939280"
"2020-01-30T04:45:17"
"2020-01-30T04:45:17"
238,603,813
1
0
null
true
"2020-02-06T04:00:28"
"2020-02-06T04:00:27"
"2020-01-30T04:45:32"
"2020-01-30T04:45:30"
1,458
0
0
0
null
false
false
# Generated by Django 3.0.2 on 2020-01-29 19:02 from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('AzadiApp', '0005_watch_under_attack'), ] operations = [ migrations.AddField( model_name='watch', name='trusted_users', field=models.ManyToManyField(related_name='watches', to=settings.AUTH_USER_MODEL), ), ]
UTF-8
Python
false
false
535
py
16
0006_watch_trusted_users.py
10
0.642991
0.607477
0
20
25.75
94
bellyfat/mws-orders-webhook
16,466,904,629,986
d105940a37460cd941a21eb75d86155d7818bcd7
9d1f2a8f878f19da3c787f62fe757649ecdcd133
/mws/handler.py
17bc6929ae3e67e4571d7b856a86d4a89d8aef82
[ "MIT" ]
permissive
https://github.com/bellyfat/mws-orders-webhook
8151820853ab16c73f86efea35d98360fc2e70a4
e2b67429ced8c3f54c305d4f1198315d7d86417b
refs/heads/master
"2022-02-26T19:19:39.987322"
"2018-11-16T11:32:10"
"2018-11-16T11:32:10"
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from mws import mws import pymysql import xmltodict import json import logging import sys import os import settings import boto3 from utils import to_amazon_timestamp from datetime import datetime, timedelta logging.basicConfig() logger = logging.getLogger() logger.setLevel(logging.INFO) try: conn = pymysql.connect(os.environ['DB_HOST'], port=os.environ['DB_PORT'], user=os.environ['DB_USER'], passwd=os.environ['DB_PASS'], db=os.environ['DB_DATABASE'], connect_timeout=5, autocommit=True) except Exception as e: logger.error("ERROR: Unexpected error: Could not connect to MySQL instance.") logger.error('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e).__name__, e) sys.exit() logger.info("SUCCESS: Connection to MySQL instance succeeded") client = boto3.client( 'lambda', region_name='us-east-1' ) def main(event, context): """ This function testing """ item_count = 0; #logger.info(event) #logger.info(context) try: orders_api = mws.Orders(os.environ['MWS_ACCESS_KEY'], os.environ['MWS_SECRET_KEY'], event['seller_id'], event['region'], auth_token = event['auth_token']) logger.info("SUCCESS: orders_api") service_status = orders_api.get_service_status() logger.info("SUCCESS: service_status") if(service_status.parsed.Status != 'GREEN'): logger.error("ERROR: MWS API is having problems") sys.exit() else: logger.info("SUCCESS: MWS API is GREEN") # updated 8h ago updated_after = to_amazon_timestamp(datetime.now() - timedelta(hours=8)) response = orders_api.list_orders(marketplaceids=event['marketplaceids'], lastupdatedafter=updated_after, max_results='25') xml_data = xmltodict.parse(response.original, process_namespaces=True, namespaces={'https://mws.amazonservices.com/Orders/2013-09-01': None, '@xmlns': None}) data = xml_data.get("ListOrdersResponse", {}).get("ListOrdersResult", {}) orders = [] orders.extend(data.get("Orders", {}).get("Order", [])) with conn.cursor() as db: for order in orders: item_count += 1 logger.debug("GOT ORDER %s", (order['SellerOrderId'])) number_of_rows = db.execute('SELECT `id`, `syncronized`, `failed` FROM `orders` WHERE `seller_id` = %s AND `seller_order_id` = %s AND `order_status` = %s', (event['seller_id'], order['SellerOrderId'], order['OrderStatus'])) db_order = db.fetchone() if(db_order == None and number_of_rows == 0): db.execute('INSERT INTO `orders` (`seller_id`, `seller_order_id`, `order_status`, `payload`, `created_at`, `updated_at`) values (%s, %s, %s, %s, NOW(), NOW());', (event['seller_id'], order['SellerOrderId'], order['OrderStatus'], json.dumps(order))) id = db.lastrowid logger.info("SUCCESS: NEW ORDER %s", (id)) call_webhook(id, url = event.get('url')) elif(db_order != None and db_order[1] == 0 and db_order[2] <= 3): logger.info("SUCCESS: EXISTING ORDER %d BUT NOT SYNCED", (db_order[0])) call_webhook(db_order[0], url = event.get('url')) else: logger.info("SUCCESS: EXISTING ORDER %d", (db_order[0])) logger.info("SUCCESS: COMPLETED CYCLE") except Exception as e: logger.error(sys.exc_info()[-1].tb_lineno) logger.error(type(e).__name__) logger.error(e) finally: return "Added %d items to MySQL table" %(item_count) def call_webhook(id, url): client.invoke( FunctionName='arn:aws:lambda:us-east-1:XXXXX:function:mws-webhook-dev-webhook', InvocationType='Event', LogType='None', Payload=json.dumps({"id": id, "url": url}) )
UTF-8
Python
false
false
3,909
py
10
handler.py
4
0.606037
0.598619
0
103
36.951456
268
Canisback/jupyterlab-git
1,941,325,221,391
25788e8d6918a673cda79d9562ae3f99cf4d9d53
2f22d004a5defd88102d4c00b33382ef61387c9f
/tests/unit/test_handlers.py
37e264dc9c903b82bf1de68a31cbb7a4a869c479
[ "BSD-3-Clause" ]
permissive
https://github.com/Canisback/jupyterlab-git
ccbcec6950f725f31b75290370d396db04dd53bf
276a42d772e0a13eb4b880c1ea45044cd5821a45
refs/heads/master
"2020-04-07T13:37:50.672812"
"2018-10-28T20:44:59"
"2018-10-28T20:44:59"
158,414,790
0
1
null
true
"2018-11-20T15:53:41"
"2018-11-20T15:53:41"
"2018-11-19T15:07:49"
"2018-10-28T20:45:00"
746
0
0
0
null
false
null
import pytest from mock import Mock, ANY from jupyterlab_git import handlers def test_mapping_added(): mock_web_app = Mock() mock_web_app.settings = { 'base_url': 'nb_base_url' } handlers.setup_handlers(mock_web_app) mock_web_app.add_handlers.assert_called_once_with(".*", ANY)
UTF-8
Python
false
false
310
py
2
test_handlers.py
2
0.664516
0.664516
0
14
21.142857
64
hsfzxjy/hsfzmun
10,033,043,632,063
e5db76a7955f93f1a537b1ff4035135f8a4520e7
265712980e01464f92a786e190ef5d4fbe4abbf7
/server/articles/migrations/0006_auto_20170130_1857.py
5f25e4bcc7579aab1f1d43a66ce24c5268f66169
[]
no_license
https://github.com/hsfzxjy/hsfzmun
cde4657deeb3567725ce70751ee2486273ee8882
9849cf1904b965cba08680d6bf653dbcb960efc8
refs/heads/master
"2021-01-11T08:32:01.058823"
"2017-03-02T13:07:41"
"2017-03-02T13:07:41"
76,778,356
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-01-30 10:57 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('articles', '0005_article_mentions'), ] operations = [ migrations.AlterModelOptions( name='article', options={'permissions': [('can_verify', 'Can verify')]}, ), ]
UTF-8
Python
false
false
435
py
138
0006_auto_20170130_1857.py
106
0.597701
0.549425
0
19
21.894737
68
frague/ganttchart
12,137,577,605,058
aa73c70f40e9bbd41f5b30bfe43c71aa2e7e55b7
994139ecd9bc4da96fde512b9711ef4b292534c7
/ganttchart/category.py
c1af20b8f70227b9c16cba07f53a3fa73cb86976
[]
no_license
https://github.com/frague/ganttchart
2cfad5190a93b436224fb2390429c5f8793ac828
d3b4e10d934c0c2ac05bf7407a52496e5e8905a8
refs/heads/master
"2021-01-01T16:26:35.706394"
"2013-07-08T08:02:32"
"2013-07-08T08:02:32"
5,430,823
5
3
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python from exceptions import * from logging import getLogger LOGGER = getLogger() category_color_index = 0 category_colors = [ "#00CCFF", "#CCFFFF", "#88FFA4", "#FFFF99", "#99CCFF", "#FF99CC", "#CC99FF", "#FFCC99", "#3366FF", "#33CCCC", "#99CC00", "#FFCC00", "#FF9900", "#FF6600", "#8282D0", "#48B5A7", "#477E2A", "#2DAFC4", "#D7A041", "#986E25", "#993300", "#993366", "#3670A3", "#A33663", "#D9ECA7", "#F3F6B7", "#F8C592", "#F4A586", "#00BFFF", "#00DED1", "#00FA9A", "#AFEEEE", "#F5DEB3", "#FFD700", "#FA8072", "#E6E6FA" ] def pick_color(): global category_colors, category_color_index result = category_colors[category_color_index] category_color_index += 1 if category_color_index == len(category_colors): category_color_index = 0 return result # Represents tasks category class Category: def __init__(self, title, color=None, is_predefined=False): self.replaces = {"Ready for a new project": "Ready"} self.title = title if color: self.color = color else: self.color = pick_color() self.is_predefined = is_predefined LOGGER.debug("New category created \"%s\"" % self) def __repr__(self): return "Category: %s (%s)" % (self.title, self.color) @property def smart_title(self): if self.is_predefined: if self.title in self.replaces.keys(): return self.replaces[self.title] return self.title
UTF-8
Python
false
false
1,567
py
14
category.py
11
0.567326
0.493299
0
49
30.979592
63
petrov-sergiu/RC_proiect
1,967,095,062,536
682c8db70f9253979f5f33f465035b4c69942326
1dff424908804db0e40ea29b78f547fcdd1cb037
/CoAP.py
f47d4dda97eda5ab29f2ff32feeb79c0a2bcd75b
[]
no_license
https://github.com/petrov-sergiu/RC_proiect
662c24a995fcd6b3c5724944a84b9c27138a6d6a
4ccdc688dfda54aa4cfb9f1a72ac0d7737c31e38
refs/heads/main
"2023-02-18T14:48:28.936964"
"2021-01-22T09:53:57"
"2021-01-22T09:53:57"
302,594,768
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import socket import select import time from Header import * from Mesaj import Mesaj from random import randint from define import * import threading class Coap: def __init__(self): self.sock = None #initializez socket cu none self.port=0 #initializarea portului self.result="" #initializarea rezultatului def start(self,addr='127.0.0.1', port=COAP_DEFAULT.PORT): #functie de start comunicatie cu parametri: adresa si portul self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #creaza un socket IPv4 (Internet, UDP), self.sock.connect((addr, port)) #Conectarea socketului la o adresa cunoscuta si la un port cunoscut def stop(self):#functie de stop comunicatie if self.sock is not None: #daca socket-ul meu este creat/exista atunci: self.sock.close() #Inchidem comunicatie/inchidem socket-ul self.sock=None #punem socketul pe valoarea none def sendPacket(self, ip, port, header, mesaj):#functie de trimitere a pachetului de date, parametri: adresa ip, portul header0ul si mesajul propriu-zis coapPacket=Mesaj() #initializare pachet coap cu mesajul (constructor din clasa Mesaj) coapPacket.createPacket(header, mesaj) #creare pachet cu header-ul si mesajul primite ca parametri status=0 #initializare status cu 0 try: status=self.sock.sendto(coapPacket.getPackege(),(ip, port)) #status ia valoarea trimsa catre server if(status>0): #daca este mai mare decat 0 status=header.getMessageId() #pune in status valoare din header print('Pachet trimis. MessageId', status) except Exception as e:#exceptie daca status-ul este 0 atunci afisam mesaj de eroare status=0 print('Exceptie la trimiterea pachetului!!!!') return status def send(self, ip, port, versiune, tip, tokenLen, metoda, token, payload):#functie de trimitere cu parametri adresa ip, portul, versiunea, tipul, dimensiune token, metoda, token-ul, data header=Header() #creare header header.setHeader(versiune, tip, tokenLen) #stabilirea/crearea header-ului token=randint(0, 65536) #token ia o valoare aleatoare data de un random header.setToken(token) header.setCode(0, metoda) #stabilirea codului metodei utilizate return self.sendEx(ip, port,header, payload) def sendEx(self,ip, port, header, payload): nextMessage=randint(0, 65536) header.setMessageId(nextMessage) header.buildHeader() return self.loop(ip, port, header, payload, COAP_DEFAULT.MAX_RETRANSMIT) def sendResponse(self, ip, port, version, tokenL, mesajid, payload, code, token): #trimitere raspuns header=Header() header.setHeader(version, COAP_TYPE.COAP_ACK,tokenL) #setare header cu versiunea, mesajul de ACK si dim token-ului header.setCode(code[0], code[1]) #stabilirea codului header.setMessageId(mesajid) #setarea id-ului mesajului 16 biti header.setToken(token) header.buildHeader() #construirea header-ului return self.sendPacket(ip, port, header, payload) #trimiterea pachetului #///////////////// def readBytesFromSocket(self, nrBytes): #functie de citire a octetilor de pe socket UDP comunications try: return self.sock.recvfrom(nrBytes) #returneaza nr de octeti cititi de pe un sicket UDP except Exception: return None, None def get(self, ip, port, url, tip): return threading.Thread(target=self.send(ip, port, COAP_DEFAULT.VERSION, tip, 4, COAP_METHOD.COAP_GET, 0, url)).start() def post(self, ip, port, url,tip ): return threading.Thread(target=self.send(ip, port, COAP_DEFAULT.VERSION, tip, 4, COAP_METHOD.COAP_POST, 0, url)).start() def handleResponse(self, header, mesaj): header.print() self.result ="Mesajul este "+ str(mesaj) print("Mesajul receptionat de la server este: "+ str(mesaj)) def getResult(self): #functie pentru obtinerea rezultatului return self.result def sendACK(self, ip, port, mesajid, token): #functie de trimitere a ACK header = Header() header.setHeader(COAP_DEFAULT.VERSION, COAP_TYPE.COAP_ACK, COAP_DEFAULT.TOKENL) header.setCode(0, 0) header.setMessageId(mesajid) header.setToken(token) header.buildHeader() self.sendPacket(ip, port, header, "") #trimiterea pachetului catre server def codeGet(self,header): if header.getCodeClass() ==2 and header.getCodeDetail() ==3: print("COAP_VALID") return 1 elif header.getCodeClass() ==2 and header.getCodeDetail() ==5: print("COAP_CONTENT") return 1 elif header.getCodeClass() ==4 and header.getCodeDetail() ==5: print ("COAP_METHOD_NOT_ALLOWD") return 0 def codePut(self, header): if header.getCodeClass() == 2 and header.getCodeDetail() == 1: print("COAP_CREATED") return 1 if header.getCodeClass() == 2 and header.getCodeDetail() == 4: print("COAP_CHANGED") return 1 if header.getCodeClass() == 4 and header.getCodeDetail() == 5: print("COAP_METHOD_NOT_ALLOWD") return 0 def verifyCodeReceive(self, headerSent, headerReceive): if headerSent.getCode() == COAP_METHOD.COAP_GET: return self.codeGet(headerReceive) elif headerSent.getCode() == COAP_METHOD.COAP_POST: return self.codePut(headerSent) #////////// def loop(self, ip, port, header, mesaj, retransmit): global headerRecive headerRecive = Header() headerRecive.setHeader(1, 2, 4) headerRecive.setCode(0, 0) headerRecive.setMessageId(33) headerRecive.setToken(70) headerRecive.buildHeader() if header.getMessageType()==COAP_TYPE.COAP_CON: #Cand se trimite CON print("Se trimite CON!") self.sendPacket(ip, port, header, mesaj) time.sleep(2) r,_,_=select.select([self.sock], [], [], COAP_DEFAULT.AKC_TIMEOUT) #Astept pt ACK if not r: print("Nu s-a primit niciun ACK de la server!") print("Trimit din nou CON!") retransmit=retransmit-1 #Se trimite CON catre server pana cand se primeste ACK sau pana cand retransmit=0 if retransmit != 0: self.loop(ip, port, header, mesaj, retransmit) else: print("Am trimis catre server "+ str(COAP_DEFAULT.MAX_RETRANSMIT) + " CON-uri. Nu am primit nimic de la server!") return; else: #Am primit ACK headerRecive=Header() buffer=Mesaj() (data, addr)=self.readBytesFromSocket(COAP_DEFAULT.BUFFER_MAX_SIZE) print(data) buffer.setPack(data) (header1, mesaj) = buffer.despachetarePacket() headerRecive.setHeader(header1) headerRecive.buildHeader() headerRecive.setCode(headerRecive.getCodeClass(), headerRecive.getCodeDetail()) if headerRecive.getCode() != 0: if self.verifyCodeReceive(header, headerRecive): print("Se iese din program!") return return self.handleResponse(headerRecive, mesaj) else: r,_,_=select.select([self.sock],[],[],COAP_DEFAULT.TIMEOUT) if not r: print("Nu am primit niciun raspuns!") else: print("Mesajul primit este gol! Mesajul are token-ul:"+str(header.getToken())) headerRecive=Header() mesaj1=Mesaj() (buffer, addr)=self.readBytesFromSocket(COAP_DEFAULT.BUFFER_MAX_SIZE) mesaj.set(buffer) (header1, mesaj)=mesaj1.despachetarePacket() headerRecive.setHeader(header1) headerRecive.buildHeader() print(str(headerRecive.getMessageType())) if headerRecive.getMessageType()==COAP_TYPE.COAP_CON: print("Am trimis ACK!") print(str(headerRecive.getMessageId())) self.sendACK(ip, port, headerRecive.getMessageId(), headerRecive.getToken()) return self.handleResponse(headerRecive, mesaj) else: #Cand se trimite NONCON print("Se trimite NONCON") self.sendPacket(ip, port, header, mesaj) #Astept raspuns r,_,_=select.select([self.sock], [], [], COAP_DEFAULT.AKC_TIMEOUT) if not r: print("Nu s-a primit nimic de la server!") else: headerRecive=Header() mesaj1=Mesaj() (buffer, addr)=self.readBytesFromSocket(COAP_DEFAULT.BUFFER_MAX_SIZE) mesaj1.setPack(buffer) (header1, mesaj)=mesaj1.despachetarePacket() headerRecive.setHeader(header1) headerRecive.buildHeader() return self.handleResponse(headerRecive, mesaj)
UTF-8
Python
false
false
9,414
py
7
CoAP.py
7
0.608562
0.599958
0
211
43.545024
191
thejqs/pa_wine_shippers
4,337,916,986,169
3299fbb71df868c811a868badcc255f027489afd
396f44dee6dc79cc311a1228a4c9eb1ad26e904e
/scraper/geolocator.py
b9ba515be4012850712e81c2477382ba8d4e80c3
[]
no_license
https://github.com/thejqs/pa_wine_shippers
dd4d4cc553c8a31c563c75c5769817d2a72f06de
0cbf6254d46dada5c47364ab9518498d75536ad2
refs/heads/master
"2020-06-13T17:20:11.110339"
"2018-10-19T16:59:47"
"2018-10-19T16:59:47"
75,576,056
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!usr/local/bin/env python3 # python imports import os import json from datetime import date from collections import namedtuple import uuid from pprint import pprint # external imports import requests # project imports from scraper_config.settings_local import geojson_stub, geojson_key import file_writer Geolocator = namedtuple( 'Geolocator', [ 'address', 'latitude', 'longitude', 'county', 'state_long_name', 'state_short_name', 'country' ] ) def get_data(source_file): with open(source_file) as f: return json.load(f) def get_latest_json(json_dir): ''' whatever is the most recent incomplete json, grabs it can be run independently of the scraper parts, so this can start up parsing the json locally without reaching out across the internets args: none returns: the most recent filename ''' return sorted(os.listdir(json_dir))[-1] def ping_api(address): ''' catches the initial response from the Google geolocation API args: an address string returns: the response object ''' p = {'address': address, 'key': geojson_key} return requests.get(geojson_stub, params=p) def get_geolocation_data(address): ''' leverages Google's address-conversion API to extract latitude and longitude as well as a better-formatted address details for each wine shipper. args: an address string to pass to the API returns: a namedtuple consisting of: [0]address, [1]latitude, [2]longitude, [3]county, [4]state_long_name, [5]state_short_name [6]country ''' county = '' state_long = '' state_short = '' country = '' # TODO: refactor into async r = ping_api(address) # addresses are self-reported and human-entered; they will always be # a little messy and the API will always miss a few try: # if the API whiffs, ['results'] will == [] formatted_address = r.json()['results'][0]['formatted_address'] lat = r.json()['results'][0]['geometry']['location']['lat'] lng = r.json()['results'][0]['geometry']['location']['lng'] address_components = r.json()['results'][0]['address_components'] # the API is maddeningly stingy with lookups -- especially for data # that can move index positions. I feel dirty writing a loop here. # but Google made me. for line in address_components: if line['types'][0] == 'administrative_area_level_2': county = line['long_name'] elif line['types'][0] == 'administrative_area_level_1': state_long = line['long_name'] state_short = line['short_name'] elif line['types'][0] == 'country': country = line['long_name'] geolocator = Geolocator( formatted_address, lat, lng, county, state_long, state_short, country ) return geolocator except IndexError: return None def try_clean_api_address(address): ''' we've collected a mapping of addresses the API hasn't been able to handle to ones it can, so this should give us a fuller overall data set ''' with open('scraper/address_map.json') as f: j = json.load(f) for d in j: if d['given_address'] == address: yield d['api_address'] break def obj_builder(data_dict, geolocator): ''' takes some of messy assigning out of build_shit(), where we might not always have a successful trip to the API. only gets called when we do. args: the target object, a dict, for our new data and a namedtuple of goodies from the API returns: the updated target object ''' # print(geolocator) # print(data_dict) # print(geolocator.address[:-5]) data_dict['api_address'] = geolocator.address[:-5] data_dict['latitude'] = geolocator.latitude data_dict['longitude'] = geolocator.longitude data_dict['county'] = geolocator.county data_dict['state_long'] = geolocator.state_long_name data_dict['state_short'] = geolocator.state_short_name data_dict['country'] = geolocator.country # print(data_dict['country']) return data_dict def build_shit(source_file): data_dicts = get_data(source_file) # pprint(data_dicts) for d in data_dicts.keys(): data_dicts[d]['uid'] = f'{uuid.uuid1()}' data_dicts[d]['scrape_date'] = f'{date.today()}' # print(data_dicts[d]['csv_address']) # print(data_dicts[d]) # TODO: better error-handling so we're not nesting try-excepts try: geolocator = get_geolocation_data(data_dicts[d]['csv_address']) # print(data_dicts[d]['csv_address']) # print(geolocator) completed_dict = obj_builder(data_dicts[d], geolocator) except (AttributeError, TypeError): # when the API misses, we first check our # known bad addresses generator_address = try_clean_api_address(data_dicts[d]['csv_address']) try: mapped_address = [a for a in generator_address][0] geolocator = get_geolocation_data(mapped_address) completed_dict = obj_builder(data_dicts[d], geolocator) except (AttributeError, TypeError, IndexError): # TODO: logging misses? # print(data_dicts[d]) # we can still have this in the data; then it's easy to see what the API missed # print(data_dicts[d]) # yield data_dicts[d] continue # print(completed_dict) # yield completed_dict # pprint(data_dicts) return data_dicts # def make_dicts_more_iterable(dicts): # return (d for d in dicts) # def track_api_misses(miss, misses=[]): # ''' # a collector for objects the API can't handle # ''' # misses.append(miss) # with open('scraper/miss.log', 'a+') as log: # print( # 'missed on:\n', # miss, '\n', # date.today(), '\n', # file=log # ) # print( # 'missed: ', # misses, '\n', # '{} total {}.'.format( # len(misses), 'miss' if len(misses) == 1 else 'misses' # ) # ) if __name__ == '__main__': initial_json_dir = 'data/initial_json/' latest_json = get_latest_json(initial_json_dir) source_file = os.path.join(initial_json_dir, latest_json) final_json_dir = 'data/' final_json_file_format = latest_json outfile = os.path.join(final_json_dir, final_json_file_format) dicts = build_shit(source_file) # dicts_gen = make_dicts_more_iterable(dicts) file_writer.write_json(dicts, outfile)
UTF-8
Python
false
false
6,943
py
19
geolocator.py
12
0.590379
0.587066
0
240
27.904167
95
Jonathan-aguilar/DAS_Sistemas
15,453,292,353,081
b39ec1678dee30a020e667c5ebf0f4440497bacd
05a70c12df808455100598d8a6fdb5635c641ab8
/Ago-Dic-2020/rodriguez-martinez-diego-jose/Practica 3/IIterableCollection.py
0d260f971187c613aaa97968e91caec075d04d3f
[ "MIT" ]
permissive
https://github.com/Jonathan-aguilar/DAS_Sistemas
991edcc929c33ba9bb8bc84e741b55c10a8420a3
4d02efc64161871084df1bff258112351e5d1241
refs/heads/development
"2023-07-24T12:26:54.698452"
"2021-09-02T20:52:26"
"2021-09-02T20:52:26"
289,764,892
1
0
MIT
true
"2021-09-02T20:52:27"
"2020-08-23T20:54:55"
"2021-06-19T21:30:38"
"2021-09-02T20:52:27"
173,441
0
0
0
Python
false
false
from abc import ABC, abstractmethod class IIterableCollection(ABC): """ ''Interfaz'' para las collecciones a iterar """ @abstractmethod def create_iterator(self): pass
UTF-8
Python
false
false
202
py
4,519
IIterableCollection.py
1,777
0.643564
0.643564
0
9
20.222222
55
tensi3165/Esports_platform
12,575,664,252,769
efff1daae2a9dea31e4b3ebc33caba0066d6f6b5
153d70a7b7bbada98f937aed470257367e3b6d4f
/esportsplatform_main/forms.py
ecf635330c72abdcc49da64bf98290f9a802ed75
[]
no_license
https://github.com/tensi3165/Esports_platform
e5992e4a4e5a32142f7a0b27eb41eff6ce8b3093
3993892cf5c16675df242eb543acc3cc7f6dd5ec
refs/heads/master
"2020-08-10T09:22:32.865087"
"2020-05-15T12:37:38"
"2020-05-15T12:37:38"
214,315,384
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django import forms from .models import * class Cmopetition_info_Form(forms.ModelForm): class Meta: model = Cmopetition_info fields = ('c_contact', 'c_rule', 'c_rule_file', 'c_prize', 'c_schedule') def __init__(self, *args, **kwargs): super(PostForm, self).__init__(*args, **kwargs) self.fields['c_rule_file'].required = False
UTF-8
Python
false
false
373
py
11
forms.py
7
0.619303
0.619303
0
10
36.3
80
alexmorozov/django-template
3,367,254,402,382
cd43f022efde0e6a03c797f8ac6ba053d95d1b37
d641f746f217ab2ee569be7e99457b6a6d645db9
/{{ cookiecutter.project_name }}/{{ cookiecutter.project_name }}/settings/common.py
18bc5107498be1a2db90c3bc1d34881c6a6c470b
[]
no_license
https://github.com/alexmorozov/django-template
dc3bf1c23802818bab0bada71c8354a299b83c67
3f8d355ab8fce497fe7f04f7aa249866dce15e61
refs/heads/master
"2021-01-18T21:52:44.234274"
"2016-12-22T13:36:14"
"2016-12-22T13:36:14"
41,729,382
3
3
null
false
"2016-12-22T13:36:15"
"2015-09-01T09:25:12"
"2016-07-21T08:39:54"
"2016-12-22T13:36:14"
25
1
2
0
Python
null
null
import os import sys TEST = 'test' in sys.argv BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) def path(*a): return os.path.join(BASE_DIR, *a) # This trick allows to import apps without that prefixes sys.path.insert(0, path('apps')) sys.path.insert(0, path('lib')) sys.path.insert(1, path('.')) ROOT_URLCONF = '{{ cookiecutter.project_name }}.urls' WSGI_APPLICATION = '{{ cookiecutter.project_name }}.wsgi.application' ALLOWED_HOSTS = ['{{ cookiecutter.site_name }}'] ADMINS = [ ('{{ cookiecutter.admin_name }}', '{{ cookiecutter.admin_email }}') ] INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django_extensions', 'djangobower', 'pipeline', 'lib', ) MIDDLEWARE_CLASSES = ( 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) # ----------------------------------------------------------------------------- # TEMPLATES ------------------------------------------------------------------- TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, 'DIRS': [path('templates')], 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] # ----------------------------------------------------------------------------- # INTERNATIONALIZATION -------------------------------------------------------- TIME_ZONE = 'Europe/Moscow' LANGUAGE_CODE = 'ru-ru' USE_I18N = True USE_L10N = True USE_TZ = True # ----------------------------------------------------------------------------- # STATIC AND MEDIA FILES ------------------------------------------------------ STATICFILES_DIRS = [ path('static'), ] STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage' STATIC_URL = '/static/' STATIC_ROOT = path('../../static') MEDIA_URL = '/media/' MEDIA_ROOT = path('../media') # ----------------------------------------------------------------------------- # BOWER SETTINGS -------------------------------------------------------------- STATICFILES_FINDERS += ( 'djangobower.finders.BowerFinder', ) BOWER_COMPONENTS_ROOT = path('static') BOWER_INSTALLED_APPS = ( 'normalize-scss#3', 'include-media#1.4' ) # ----------------------------------------------------------------------------- # PIPELINE SETTINGS ----------------------------------------------------------- STATICFILES_FINDERS += ( 'pipeline.finders.PipelineFinder', ) PIPELINE = { 'CSS_COMPRESSOR': None, 'DISABLE_WRAPPER': True, 'JS_COMPRESSOR': None, 'SASS_ARGUMENTS': '--include-path %s' % path('static'), 'SASS_BINARY': 'sassc', 'COFFEE_SCRIPT_ARGUMENTS': '-b', 'STYLESHEETS': {}, 'JAVASCRIPT': {}, 'COMPILERS': ( 'pipeline.compilers.coffee.CoffeeScriptCompiler', 'pipeline.compilers.sass.SASSCompiler', ), } # ----------------------------------------------------------------------------- # IPYTHON NOTEBOOK ------------------------------------------------------------ IPYTHON_ARGUMENTS = [ '--ext', 'django_extensions.management.notebook_extension', ] NOTEBOOK_ARGUMENTS = [ '--ip=0.0.0.0', '--no-browser', ]
UTF-8
Python
false
false
4,028
py
18
common.py
7
0.518371
0.514896
0
144
26.972222
79
tmpereira/SPC
7,481,833,043,089
b4faf13417b1ef2e8277f5ae3422aa0f9c0ce373
b036c301a4e579e933bfcf27b449ffd6a32c8c40
/thermo.py
423a1ca49cd0e9903f4a5fcc5deb1cb29c52fcc1
[]
no_license
https://github.com/tmpereira/SPC
cab35eb0eb9ae5e3dabb00efe74daae6d0d61501
095667743ab449783eacbaa5f94a21d0cba921e3
refs/heads/main
"2023-01-04T14:46:09.363730"
"2020-11-03T19:36:53"
"2020-11-03T19:36:53"
309,788,419
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np import glob import matplotlib.pyplot as plt # funçao para leitura de um arquivo unico def read_one_file(arq): f = open(arq, 'r') matrix = [] for i in f.readlines(): ver = i.split(';') wn = ver[0].replace(',','.') wn = np.array(wn).astype('f4') abss = ver[1].replace(',','.') abss = np.array(abss).astype('f4') matrix.append([wn,abss]) f.close() return np.array(matrix) # funçao para leitura de varios arquivos dentro de diretorio def read_dir_files(path,group): arqs = np.array(group) r = [] for file in glob.glob(path + "*.csv"): ver = read_one_file(file) r.append(ver[:,1]) data= {} data['r']=np.array(r) data['wn'] = ver[:,0] data['g']= np.ones(len(r)).astype('i8') data['arqs']= arqs return data
UTF-8
Python
false
false
875
py
6
thermo.py
6
0.539519
0.531501
0
31
26.16129
60
Rudra0711/The-Hangman-Game_python
15,582,141,364,915
0ab0f72d16ebaf31d823e1c0654d8024e3213304
3f9daccd966a4033fa399e0cd53d543fec67447a
/app.py
dd5711bd74186a2fced2a2badd47f1b8dec4d11e
[]
no_license
https://github.com/Rudra0711/The-Hangman-Game_python
71842600013e24349ae9d5c713e9f09205f7335e
ec48392eb105cd09c121d97ab31e0c2d2634ae6d
refs/heads/master
"2022-01-05T21:00:17.516210"
"2019-04-29T16:38:21"
"2019-04-29T16:38:21"
184,107,500
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import random WORDS=[]; with open('words.txt','r') as wordsFile: WORDS=[name.strip() for name in wordsFile.readlines()] random=random.randrange(1,100,1) WORDS[random]="decision" ques_word=["*" for letter in WORDS[random]] wa=ques_word.count('*') def genList(ques_word): for i in ques_word: print(i,end="") return "" attempted=[] while wa>0: i=0; ch=input(f"{genList(ques_word)} [Wrong attempts left : {wa}]\nCharacter : ") if ch in attempted: print("You already attempted this!") else: if(WORDS[random].find(ch)!=-1): for name in WORDS[random]: if(WORDS[random][i]==ch): ques_word[i]=ch i+=1 else: wa-=1 attempted.insert(0,ch) try: ques_word.index('*') except ValueError: print('Congo! You won the game') break else: print('Oops!You lost.\nTry again')
UTF-8
Python
false
false
937
py
2
app.py
1
0.559232
0.547492
0
39
23.025641
84
dupoy/ucrm
9,070,970,959,258
d536d86348eea2755cb7491915622f98e1926e03
f62a1f6ae632835283ab9980355785a478988165
/orders/admin.py
1539d922645ac54a32869597a7337ae138138e8b
[]
no_license
https://github.com/dupoy/ucrm
b9d014ece41e6f57c544380667c51533a58a1bda
88692df9f0ee146c8d8fd04be297de9ad5c80f10
refs/heads/master
"2023-05-07T17:51:44.126929"
"2021-05-27T07:01:35"
"2021-05-27T07:01:35"
366,280,181
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.contrib import admin from orders.models import Order, OrderItem @admin.register(Order) class OrderAdmin(admin.ModelAdmin): list_display = ['customer', 'created_at', 'get_price'] @admin.register(OrderItem) class OrderItemAdmin(admin.ModelAdmin): list_display = ['order', 'product', 'quantity', 'total_price']
UTF-8
Python
false
false
333
py
74
admin.py
45
0.735736
0.735736
0
13
24.615385
66
Koredotcom/KnowledgeGraphGenerator
7,164,005,499,375
7ccb9605fc50a7b7ead26d89782493d87b9c7936
e31cc5ed229d10d4fb8fd68a2f93d9f2cccd450c
/strategy/NGramStrategy.py
1f528104a913df7bb0326d1ba86cf5a9598d777b
[]
no_license
https://github.com/Koredotcom/KnowledgeGraphGenerator
6ffb8ba8301e131b85b28ba23a95e1a873aa2843
fc9de1d7cfa68833981e8c7af200845304cea925
refs/heads/master
"2023-07-06T13:48:07.042219"
"2022-02-28T12:42:54"
"2022-02-28T12:42:54"
202,364,674
14
11
null
false
"2023-06-30T22:20:04"
"2019-08-14T14:15:11"
"2023-05-12T10:58:29"
"2023-06-30T22:20:00"
2,267
10
10
5
Python
false
false
from collections import defaultdict from tqdm import tqdm from common import nlp, BOT_NAME from strategy.phrase_finder import PhraseFinder from log.Logger import Logger import copy import traceback import re from analyzer.kg_export.language.Lemmatize import Lemmatizer logger = Logger() phrase_finder_obj = PhraseFinder() lemma = Lemmatizer() class GramBasedGenerator(object): def __init__(self): pass @staticmethod def _filter_substrings(node_names): new_node_names = copy.deepcopy(node_names) for node_1 in node_names: node_1_stripped = node_1.strip() for node_2 in node_names: node_2_stripped = node_2.strip() try: if node_1_stripped != node_2_stripped: if node_2_stripped in node_1_stripped: new_node_names.remove(node_2) except Exception: pass return new_node_names @staticmethod def add_tag_to_single_word_questions(ques, stop_tokens): tag = '' try: ques = ques.strip() ques = ques[:-1] if ques.endswith('?') else ques ques_word_set = set(ques.lower().split()).difference(stop_tokens) if len(ques_word_set) == 1: tag = list(ques_word_set)[0] except Exception: logger.error(traceback.format_exc()) finally: return tag def generate_graph(self, qna_object_map, stop_tokens): normalized_ques_list = [qna_obj.normalized_ques for qna_obj in qna_object_map.values()] phrases, uni_tokens, verbs = phrase_finder_obj.find_all_phrases(normalized_ques_list, stop_tokens) most_commons_terms = dict() most_commons_terms.update(phrases.most_common()) most_commons_terms.update(uni_tokens.most_common()) most_commons_terms.update(verbs.most_common()) quest_ontology_map = defaultdict(dict) logger.info('Initiated ontology generation') try: for ques_id, qna_object in tqdm(qna_object_map.items()): ques = qna_object.normalized_ques quest_ontology_map[ques_id]['question'] = qna_object.question tags = '' terms = list() q = copy.deepcopy(ques) doc = " ".join(lemma.lemmatize(q)) for term, cnt in phrases.most_common(): if cnt == 1: break if term in stop_tokens: continue try: regex = re.compile("\\b" + term + "\\b") if re.findall(regex, doc) and cnt > 1: doc = re.sub(regex, "~~~~", doc) terms.append(term) except Exception: print(traceback.format_exc()) for term, cnt in uni_tokens.most_common(): if cnt == 1: break if term in stop_tokens: continue try: regex = re.compile("\\b" + term + "\\b") if re.findall(regex, doc): doc = re.sub(regex, "~~~~", doc) terms.append(term) except Exception: print(traceback.format_exc()) for term, cnt in verbs.most_common(): if cnt == 1: break try: regex = re.compile("\\b" + term + "\\b") if re.findall(regex, doc): tags = term except Exception: pass if not (terms or tags): tags = self.add_tag_to_single_word_questions(qna_object.question, stop_tokens) terms = sorted(self._filter_substrings(terms), key=lambda x: most_commons_terms[x]) + [BOT_NAME] quest_ontology_map[ques_id]['terms'] = terms tags = [tags] if tags else [] quest_ontology_map[ques_id]['tags'] = tags except Exception: logger.error(traceback.format_exc()) raise Exception('Failed in generating ontology') return quest_ontology_map
UTF-8
Python
false
false
4,440
py
23
NGramStrategy.py
20
0.500225
0.496171
0
113
38.292035
112
cambridge-cares/TheWorldAvatar
12,824,772,352,928
34297f629903f19b8d8ddf09fa890c83f907e845
65c84146c9f3871f54fe51ab437bd14927ade706
/JPS_ARBITRAGE/python/caresjpsarbitrage/CPO_to_FAME_Aspen.py
20bd042845860fe0971c7d10a70579c6734314f0
[ "MIT" ]
permissive
https://github.com/cambridge-cares/TheWorldAvatar
3a227c629acb10b9be7effe58696aef48b27dd21
01a7d108754e0249d8d3b5b75708345c399c1ee9
refs/heads/main
"2023-08-21T11:05:09.147655"
"2023-08-18T07:46:57"
"2023-08-18T07:46:57"
345,141,689
57
26
MIT
false
"2023-09-13T14:16:57"
"2021-03-06T16:33:07"
"2023-09-13T10:16:00"
"2023-09-13T14:16:57"
433,486
48
19
149
Java
false
false
## # @file # File documentation # This file is a set of functions designed to work as an arbitrage spotter for a chemical plant converting a reagent into a product. The spotter looks through the chemical futures markets for profitable opportunities to purchase the reagent, convert it into the product and then sell the product. It factors in the costs of transport, storage and conversion and looks for the cheapest schedule. The conversion costs are based on an Aspen simulation of a relevant conversion process. # Important assumptions include instantenaous conversion and transport, pricing other than futures is based on the literature, only marginal profit is calculated (this is liable to changes soon). # Additionally, it i assumed that the plant is already functioning on a long-term contract. For that reason the changes to plant's function cannot be too great and capital costs may be ignored in the calculations. import win32api, win32com.client as win32, requests, sys from lxml import html from math import inf import matplotlib.pyplot as plt from csv_funcs import RCSV, ACSV def preprocessing(file_addresses): # This function reads pricing data from .csv files in 'file_addresses', converts numbers from strings to floats and CNY to USD and sorts prices and their timestamps into separate dictionaries # It accepts a dictionary where keys are names of the chemicals and values are the file addresses. # Create variables to hold the read data prices, dates = {}, {} for key in file_addresses: prices[key] = [] dates[key] = [] # Read and store data in arrays in the dictionaries for key in file_addresses: prices[key], dates[key] = RCSV(file_addresses[key]) # This line discards historical data by trimming off just the last line prices[key] = prices[key][-len(dates[key]):] # This loops convert the contract size and the prices from strings to floats. It is assumed that the first two entries are not numbers, but headers. # If a string within the range is not a number or a 0, then it is store as a None. for key in file_addresses: for i in range(2,len(prices[key])): try: prices[key][i] = float(prices[key][i]) if prices[key][i] == 0: prices[key][i] = None except ValueError: prices[key][i] = None return dates, prices def exchange_rates(prices, currencies): # This function converts an array of prices from one currency to another. It accepts an array with floats (but skips over non-floats) and an array with two strings. The latter need to be the correct currency codenames as per www.xe.com as this function will download the rates from there. # The first entry in the array with codenames corresponds to the present currency, while the second to the target currency. # Url is being formulated url = 'http://www.xe.com/currencyconverter/convert/?Amount=1&From='+currencies[0]+'&To='+currencies[1] # requests library is used to download the source code of the page with exchange rates. The code is then parsed as html. page = requests.get(url) tree = html.fromstring(page.content) page.close() # lxml library is used to search through the html file using its structure and attributes exchange_rate = float(tree.xpath('//span[@class="uccResultUnit"]')[0].attrib['data-amount']) # This loop calculates the converted prices if they are in an array. if type(prices) == list: for i in range(len(prices)): if type(prices[i]) == float: prices[i] *= exchange_rate elif type(prices) == dict: for key in prices: if prices[key]['unit'][:3] == currencies[1]: continue else: prices[key]['value'] *= exchange_rate prices[key]['unit'] = currencies[1]+prices[key]['unit'][3:] return prices def transport_costs(prices): # This function adds transportation cost to the prices of the reagent and subtracts the transportation cost from the prices of the product. for key in prices: if key == 'FAME': # The code adds the cost of transporting biodiesel from south-eastern Asia to south China. # Those markups are calculatd based on https://www.icis.com/resources/news/2013/11/08/9723077/se-asia-to-china-palm-oil-freight-rates-may-fall-on-weak-demand/ tmp = prices[key][3:] # This loop calculates the converted prices. for i in range(len(tmp)): if type(tmp[i]) == float: tmp[i] = tmp[i] - 40 prices[key][3:] = tmp else: # The code adds the cost of transporting crude palm oil from Malaysia to Singapore. # Those markups are very vaguely based on https://www.icis.com/resources/news/2013/11/08/9723077/se-asia-to-china-palm-oil-freight-rates-may-fall-on-weak-demand/ tmp = prices[key][3:] # This loop calculates the converted prices. for i in range(len(tmp)): if type(tmp[i]) == float: tmp[i] = tmp[i] + 5 prices[key][3:] = tmp return prices def storage_cost(prices = None): # This function desribes two option for pricing storage of biodiesel and crude palm oil. # Option 1 is entirely based on literature and data from the biodiesel plant on Jurong Island provided by Martin (for details see below). # Option 2 is based on the futures prices where it is assumed that the differences in chronologically subsequent prices approximates storage prices. # Option 1 - Literature # Storage and associated costs per year per tonne of biodiesel and crude palm oil is 2.70 USD in 2017 (extrapolated from Martin's documents by adjusting for inflation using http://www.usinflationcalculator.com/ on 09.05.2017; ~3.5SGD or ~2.3USD in 2007 (price range 3-4 SGD in 2007); conversion from SGD to USD based on historical rates from http://www.x-rates.com/historical/?from=SGD&amount=1&date=2007-05-01) # Prices are converted to USD per tonne per month storage = {'CPO':2.49/12, 'FAME':2.49/12} # Option 2 - infer from futres prices #for key in prices: # counter = 0 # while prices[key][4+counter] == None or prices[key][3+counter] == None: # counter += 1 # if prices[key][4+counter]-prices[key][3+counter] > 0: storage[key] = prices[key][4+counter]-prices[key][3+counter] # print(prices[key][4+counter]-prices[key][3+counter]) return storage def chemical_conversion(complexity = 'simple', aspen = None): # This function calculates the number of tonnes of crude palm oil required to produce a metric tonne of biodiesel. # Two modes are available: 'simple' (based on literature) and complex (based on an Aspen simulation). # The former assumes a constant conversion factor between biodiesel production and crude palm oil consumption. # The latter is a marginal analysis around the steady state within the model. if complexity == 'simple': # This analysis assumes that on average production of 1 tonne of biodiesel requires 1 tonne of crude palm oil. CPO_FAME = 1 return CPO_FAME else: # This analysis calculates the ratio of tonnes of biodiesel to tonnes of crude palm oil based on the provided Aspen simulation. # Both biodiesel and palm oil are provided in kilogrames per hour. try: CPO = aspen.Tree.FindNode(r"\Data\Streams\OIL\Output\MASSFLMX\MIXED").Value FAME = aspen.Tree.FindNode(r"\Data\Streams\FINALPRD\Output\MASSFLMX\MIXED").Value except: print('Invalid Aspen Plus address. Simple analysis will be performed.') return chemical_conversion() return CPO/FAME def conversion_cost(aspen): # This function calculates the cost of producing an additional metric tonne of biodiesel based on the average utility consumption. # The pricing data has been taken from Martin's spreadsheets of the biodiesel plant and govermental data. # Historical conversion rates were taken from https://www.oanda.com/currency/converter/. # Adjustment for inflation was done using http://www.usinflationcalculator.com/. # Electricity costs (daily average): (0.1727+0.1051)/2 SGD per kWh (HIGH TENSION SMALL (HTS) SUPPLIES https://www.ema.gov.sg/Non_Residential_Programmes_Electricity_Tariffs.aspx) # HP Steam (2800kPa, 230C in HYSYS) 2.97 USD per tonne in 2007 (adjusting for inflation 3.49 USD on 09.05.2017) # MP Steam (600kPa, 160C in HYSYS) 10.75 USD per tonne in 2007 (adjusting for inflation 12.64 USD per tonne on 09.05.2017) # PROCESS WATER in spreadhsheet 'utilities cost v19 (SCU) excl CPO refining.xls' in 'unit prices' see 'Industrial Water' 0.430 SGD per tonne in 2007 (converting to USD in 2007 and adjusting for inflation 0.33 USD until 09.05.2017) # COOLING WATER in spreadhsheet 'utilities cost v19 (SCU) excl CPO refining.xls' in 'total cost' see 'Cooling water makeup' 1.48 USD per tonne in 2007 (adjusting for inflation 1.74 USD on 09.05.2017) # FUEL GAS in spreadhsheet 'utilities cost v19 (SCU) excl CPO refining.xls' in 'unit prices' see 'Fuel gas' 14.85 SGD per mmBTU in 2007 (converting to USD in 2007 and adjusting for inflation 9.8 USD until 09.05.2017) # Realistic utility costs (in order of significance: COOLING WATER, MP STEAM, HP STEAM, FUEL GAS, PROCESS WATER, Elec) cost = {'MP STEAM':{'value': 12.64/1000, 'unit':'USD per kg'}, 'PROCESS WATER': {'value':0.33/1000, 'unit':'USD per kg'}, 'COOLING WATER':{'value': 1.74/1000, 'unit':'USD per kg'}, 'Elec': {'value':(0.1727+0.1051)/2/3600, 'unit':'SGD per kJ'}, 'FUEL GAS': {'value':9.8, 'unit':'USD per mmBTU'}, 'HP STEAM':{'value': 3.49/1000, 'unit':'USD per kg'}} cost = exchange_rates(cost, ['SGD', 'USD']) # Read consumption rates from the provided HYSYS simulation consumption = {} aspen.Engine.Run2() consumption['FAME'] = {'value':aspen.Tree.FindNode(r"\Data\Streams\FINALPRD\Output\MASSFLMX\MIXED").Value, 'unit':'kg/hr'} # kW are converted to kJ/hr consumption['Elec'] = {'value':aspen.Tree.FindNode(r"\Data\Streams\ELECLINE\Output\POWER_OUT").Value*3600, 'unit':'kJ/hr'} consumption['COOLING WATER'] = {'value':aspen.Tree.FindNode(r"\Data\Streams\FCW\Output\MASSFLMX\MIXED").Value, 'unit':'kg/hr'} # kmol/hr are converted into l/hr (molar volume of methane at STD was taken from http://chemistry.tutorvista.com/inorganic-chemistry/molar-volume.html) # l/hr into Mcft/hr (google.com unit converter; 1 Mcft = 1000 cft) # Mcft/hr into mmBTU/hr (https://business.directenergy.com/understanding-energy/energy-tools/conversion-factors) consumption['FUEL GAS'] = {'value':aspen.Tree.FindNode(r"\Data\Streams\FFG\Output\MOLEFLMX\MIXED").Value*22.4*0.0353147*0.9756, 'unit':'mmBTU/hr'} consumption['HP STEAM'] = {'value':0, 'unit':'kg/hr'} consumption['MP STEAM'] = {'value':aspen.Tree.FindNode(r"\Data\Streams\FSTEAM\Output\MASSFLMX\MIXED").Value, 'unit':'kg/hr'} consumption['PROCESS WATER'] = {'value':aspen.Tree.FindNode(r"\Data\Streams\FPW\Output\MASSFLMX\MIXED").Value, 'unit':'kg/hr'} # Calculate cost per tonne of biodiesel total = 0 for key in cost: total += consumption[key]['value']*cost[key]['value'] return total/consumption['FAME']['value'] def look_for_munnies(SimAddress, prices, dates): # This function performs puts together all the data and looks for the most profitable arbitrage opportunity using a brute force approach (i.e. checking all possible combintations subject to chronological order constraint). # Natural gas to menthanol conversion ratio (mmBTU to tonnes) and the average conversion cost per tonne of methanol are retrieved with the code below. # In case this cannot be done a simple analysis will be performed. try: # Connect to an existing simulation or launch a new instance aspen = win32.GetObject (SimAddress) # Retrieve the average conversion cost per tonne of methanol conv = conversion_cost(aspen) #how many NG contracts are needed to fulfill 1 MeoH contract CPO_FAME = chemical_conversion('complex', aspen) except: print('Incorrect model address or wrong model item address. Simple analysis will be executed.') CPO_FAME = chemical_conversion() # The storage cost per month-tonne are retrieved storage = storage_cost() # This loop searches through opportunities given that all ncrude palm oil is stored and that conversion is instantaneous lowest_diff = {'price difference': -inf, 'month_CPO':None, 'month_FAME':None} for j in range(3,len(prices['FAME'])): if prices['FAME'][j] == None: continue for i in range(3,len(prices['CPO'])): if prices['CPO'][i] == None: continue if i+1 >= j: continue diff = prices['FAME'][j] - CPO_FAME*(prices['CPO'][i] + storage['CPO']*(j-i)) - conv #print(diff) if diff > lowest_diff['price difference']: lowest_diff = {'price difference': diff, 'month_CPO':dates['CPO'][i], 'month_FAME':dates['CPO'][j-1], 'note':'Note that all crude palm oil was stored and instantaneously converted on delivery date.' } # This loop searches through opportunities given that all biodiesel is stored and that conversion is instantaneous for j in range(3,len(prices['FAME'])): if prices['FAME'][j] == None: continue for i in range(3,len(prices['CPO'])): if prices['CPO'][i] == None: continue if i+1 >= j: continue diff = prices['FAME'][j] - CPO_FAME*prices['CPO'][i] - storage['FAME']*(j-i) - conv #print(diff) if diff > lowest_diff['price difference']: lowest_diff = {'price difference': diff, 'month_CPO':dates['CPO'][i], 'month_FAME':dates['CPO'][j-1], 'note':'Note that all crude palm oil was instantaneously converted on arrival and biodiesel FAME was stored until delivery date.'} print('The highest marginal profit per tonne of biodiesel FAME is', round(lowest_diff['price difference'],2), 'USD. The futures contracts need to be accepted at the following ratio of reagent to product:',CPO_FAME/prices['CPO'][2]*prices['FAME'][2], '. Buy crude palm oil futures contracts with delivery in', lowest_diff['month_CPO'], 'and sell biodiesel FAME futures contracts with delivery in', lowest_diff['month_FAME'],'.', lowest_diff['note']) def plotting_prices(dates, prices, labels,plot_address): # This function plots the futures prices for two commodities. It assumes that there may be None values between the headers and the prices. It searches for them and they are excluded from the plot. # The loop and if statements below segregate the keys based on the length of the dates array. keys = [] for key in dates: keys.append(key) if len(dates[keys[0]]) > len(dates[keys[1]]): key_1 = keys[1] key_2 = keys[0] else: key_1 = keys[0] key_2 = keys[1] # The lower and upper bounds are calculated based on the number of None values within the prices array. lower_bound = 3 upper_bound = len(dates[key_1]) # Loop counting None values between the headers and prices for item in prices[key_1][3:]: if item == None: lower_bound += 1 else: break # Loop counting None values at the back. The prices array needs to be reversed for the loop and then reversed back to the original prices[key_1].reverse() for item in prices[key_1]: if item == None: upper_bound -= 1 else: break prices[key_1].reverse() # x-axis labels are defined from a user array. Here it depends on the outcome of the above calculations. x_labels = labels['x']['label'][lower_bound:upper_bound] # Changing font size plt.rcParams.update({'font.size': 22}) # The lines below put together a figure containing two plots, one for each commodity. Labels and titles come from the user-defined dictionary. plt.figure(figsize=(20.0, 12.5)) plt.subplot(212) plt.xlabel(labels['x']['title']) plt.ylabel(labels[key_1]['label']) plt.title(labels[key_1]['title']) plt.xticks(range(len(x_labels)),x_labels) # The line below defines the style. For more information see Python manual. plt.plot(prices[key_1][lower_bound:upper_bound], 'k', prices[key_1][lower_bound:upper_bound], 'bo') plt.subplot(211) plt.ylabel(labels[key_2]['label']) plt.title(labels[key_2]['title']) plt.xticks(range(len(x_labels)),[]) # The line below defines the style. For more information see Python manual. plt.plot(prices[key_2][lower_bound:upper_bound], 'k', prices[key_2][lower_bound:upper_bound], 'ro') plt.show() #plt.savefig(r'C:\Users\Janusz\Desktop\Commodity_prices\Market_data\arbitrage_CPO.png') #plt.savefig(plot_address) def run(plot_address): # Define address of a relevant Aspen Plus model SimAddress = win32api.GetLongPathName(r"C:\Users\Janusz\Desktop\Commodity_prices\Biodiesel Final Simulation_20160429\Jbiod_WWHR_23052017.apw") #given the simulation address connect to an existing simulation or create a new COM instance aspen = win32.GetObject (SimAddress) file_addresses = { 'CPO':r'C:\Users\Janusz\Desktop\Commodity_prices\Market_data\CPO_data.csv', 'FAME':r'C:\Users\Janusz\Desktop\Commodity_prices\Market_data\FAME_data.csv' } # Read in and process data into an appripriate format dates, prices = preprocessing(file_addresses) # Adjust prices to include the transport cost prices = transport_costs(prices) # Search through the arbitrage opportunities look_for_munnies(SimAddress, prices, dates) # Define titles and labels to plot the futures prices data and plot the data labels = {'FAME':{'title':'Biodiesel FAME futures prices from Chicago Mercantile Exchange', 'label':'Price (USD per tonne)'},'CPO':{'title':'Crude palm oil futures prices from Chicago Mercantile Exchange', 'label':'Price (USD per tonne)'}, 'x':{'title':'Delivery date (-)', 'label':dates['FAME']}} plotting_prices(dates, prices, labels,plot_address) if __name__ == "__main__": run(str(sys.argv[1]))
UTF-8
Python
false
false
18,332
py
9,300
CPO_to_FAME_Aspen.py
4,926
0.688652
0.666776
0
311
57.935691
482
sahu1993/Data-Analytics-Pipeline-Using-Apache-Spark
4,672,924,451,047
a09864717deb419ce244500f0476c72815d8c39a
1abb87f9bbcc40ab026b0f4637c29c8c28c2bd24
/lab3_pyspak.py
548bbefe1fb79da876aed4b7a1672f4fec440355
[]
no_license
https://github.com/sahu1993/Data-Analytics-Pipeline-Using-Apache-Spark
c3dad3d05efa2390f24693399dca962cd89a7608
c78b2a49a25710c180cd190359075da22e8be02d
refs/heads/master
"2020-03-27T00:39:57.806138"
"2018-08-22T02:28:06"
"2018-08-22T02:28:06"
145,643,650
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from pyspark import SparkConf, SparkContext from pyspark.sql import SQLContext from pyspark.ml.feature import HashingTF, IDF, NGram from pyspark.sql import SparkSession from pyspark.ml.feature import StopWordsRemover from operator import add from pyspark.ml.classification import LogisticRegression from pyspark.ml.evaluation import MulticlassClassificationEvaluator from pyspark.ml.classification import RandomForestClassifier from pyspark.ml.classification import NaiveBayes from pyspark.mllib.evaluation import MulticlassMetrics import pyspark.sql.functions as f import sys import re def tokens(context): file = context[0] words = re.sub('[^a-z0-9]+',' ',context[1].lower()).split() file = file.split("/")[-1] #Class Label 0 - Sports, 1 - Politics, 2- Business, 3- Education if(re.match( r'Sportsfile.*', file)): return (0.0, file ,words) elif(re.match( r'Politicsfile.*', file)): return (1.0, file, words) elif(re.match( r'Businessfile.*', file)): return (2.0, file, words) else: return (3.0, file, words) if __name__ == "__main__": conf = SparkConf() conf.setAppName( "part1" ) conf.set("spark.executor.memory", "2g") sc = SparkContext.getOrCreate(conf = conf) #reading input lines =sc.wholeTextFiles("data/Sports") #configuring SparkSession spark=SparkSession(sc) hasattr(lines, "toDF") #tokeinizing the words and converting into dataframes tokenizeDf0 = lines.map(tokens).toDF(["label", "fileName", "words"]) lines =sc.wholeTextFiles("data/Politics") hasattr(lines, "toDF") tokenizeDf1 = lines.map(tokens).toDF(["label", "fileName", "words"]) lines =sc.wholeTextFiles("data/Business") hasattr(lines, "toDF") tokenizeDf2 = lines.map(tokens).toDF(["label", "fileName", "words"]) lines =sc.wholeTextFiles("data/Education") hasattr(lines, "toDF") tokenizeDf3 = lines.map(tokens).toDF(["label", "fileName", "words"]) result_tokenize_data_1 = tokenizeDf0.union(tokenizeDf1) result_tokenize_data_2 = tokenizeDf2.union(tokenizeDf3) tokenizeDf = result_tokenize_data_1.union(result_tokenize_data_2) #removing the Stop words remover = StopWordsRemover(inputCol="words", outputCol="filtered") filteredWordsDf = remover.transform(tokenizeDf) #finding the tf value hashingTF = HashingTF(inputCol = "filtered", outputCol = "rawFeatures") tf = hashingTF.transform(filteredWordsDf) #finding the idf value idf = IDF(inputCol = "rawFeatures", outputCol = "features", ) idfModel = idf.fit(tf) rescaledData = idfModel.transform(tf) (trainingDataLR, testDataLR) = rescaledData.randomSplit([0.8, 0.2], seed = 100) (trainingDataNB, testDataNB) = rescaledData.randomSplit([0.8, 0.2], seed = 100) lines =sc.wholeTextFiles("data/UnknownSetData") hasattr(lines, "toDF") tokenizeDf4 = lines.map(tokens).toDF(["label", "fileName", "words"]) #removing the Stop words remover = StopWordsRemover(inputCol="words", outputCol="filtered") filteredWordsDf = remover.transform(tokenizeDf4) #finding the tf value hashingTF = HashingTF(inputCol = "filtered", outputCol = "rawFeatures") tf = hashingTF.transform(filteredWordsDf) #finding the idf value idf = IDF(inputCol = "rawFeatures", outputCol = "features", ) idfModel = idf.fit(tf) testDataUnknownSetLR = idfModel.transform(tf) testDataUnknownSetRF = idfModel.transform(tf) testDataUnknownSetNB = idfModel.transform(tf) #### Machine Learning#### lr = LogisticRegression(maxIter=20, regParam=0.3, elasticNetParam=0) logisticRegressionModel = lr.fit(trainingDataLR) predictions = logisticRegressionModel.transform(testDataLR) #predictions.select("fileName","probability","label","prediction").show() #predictions.select("fileName","probability","label","prediction").rdd.saveAsTextFile("data/output") evaluator = MulticlassClassificationEvaluator(predictionCol="prediction") accuracy = evaluator.evaluate(predictions) print("AccuracyLRTest = %g" % (accuracy)) predictionsLabels = predictions.select("prediction","label").rdd metrics = MulticlassMetrics(predictionsLabels) confusionMatrix = metrics.confusionMatrix().toArray() print(confusionMatrix) predictions = logisticRegressionModel.transform(testDataUnknownSetLR) #predictions.select("fileName","probability","label","prediction").show() #predictions.select("fileName","probability","label","prediction").rdd.saveAsTextFile("data/output") evaluator = MulticlassClassificationEvaluator(predictionCol="prediction") accuracy = evaluator.evaluate(predictions) print("AccuracyLRTestUnknow = %g" % (accuracy)) predictionsLabels = predictions.select("prediction","label").rdd metrics = MulticlassMetrics(predictionsLabels) confusionMatrix = metrics.confusionMatrix().toArray() print(confusionMatrix) nb = NaiveBayes(smoothing=1) naiveBayesModel = nb.fit(trainingDataNB) predictions = naiveBayesModel.transform(testDataNB) #predictions.select("fileName","probability","label","prediction").show() #predictions.select("fileName","probability","label","prediction").rdd.saveAsTextFile("data/outputNB1") evaluator = MulticlassClassificationEvaluator(predictionCol="prediction") accuracy = evaluator.evaluate(predictions) print("AccuracyNBTest = %g" % (accuracy)) predictionsLabels = predictions.select("prediction","label").rdd metrics = MulticlassMetrics(predictionsLabels) confusionMatrix = metrics.confusionMatrix().toArray() print(confusionMatrix) predictions = naiveBayesModel.transform(testDataUnknownSetNB) #predictions.select("fileName","probability","label","prediction").show() #predictions.select("fileName","probability","label","prediction").rdd.saveAsTextFile("data/outputNB1") evaluator = MulticlassClassificationEvaluator(predictionCol="prediction") accuracy = evaluator.evaluate(predictions) print("AccuracyNBTestUnknown = %g" % (accuracy)) predictionsLabels = predictions.select("prediction","label").rdd metrics = MulticlassMetrics(predictionsLabels) confusionMatrix = metrics.confusionMatrix().toArray() print(confusionMatrix) sc.stop()
UTF-8
Python
false
false
6,305
py
37
lab3_pyspak.py
2
0.72276
0.714036
0
156
39.416667
107
Arash-Pouya/electroshop
8,727,373,580,423
1f61ec9b3f4946e624e5f2234213c2e3aad60c39
cee3857fc3380501b2d510574db75a2a811f4150
/shop/models.py
98b28df6b1df8700a39070dd776a04db4f0c1aed
[]
no_license
https://github.com/Arash-Pouya/electroshop
c0d08da42aee9455b42a31b243fe6fccab37329d
9ec6d31cd4b353fa6507a0ca82b0f6cc5b7fd5ea
refs/heads/master
"2023-06-19T21:44:00.806592"
"2021-07-14T18:39:53"
"2021-07-14T18:39:53"
386,035,942
1
0
null
true
"2021-07-14T18:41:12"
"2021-07-14T18:16:37"
"2021-07-14T18:39:56"
"2021-07-14T18:41:01"
2,441
0
0
2
Python
false
false
from django.db import models from django.urls import reverse from django.conf import settings from django.utils.text import slugify from django.core.validators import MinValueValidator, MaxValueValidator from django.contrib.contenttypes.fields import GenericRelation from wagtail.core.fields import RichTextField from profile.models import discount_model_validator from user_activity.models import Comment from django_countries.fields import CountryField from taggit.managers import TaggableManager from datetime import datetime import uuid import os # CATEGORY_DEFAULT_BACKGROUND = os.path.join(settings.BASE_DIR, 'shop', 'static', 'images', 'mainboard.jpg') def founded_choice(): min_year = 1900 max_year = datetime.now().year year_list = [('< '+str(min_year), '< '+str(min_year))] for year in range(min_year, max_year+1): year_list.append((str(year), str(year))) return year_list def category_directory_path(instance, filename): return f'category_{instance.name}_bg/{filename}' def brand_directory_path(instance, filename): return f'logo_{instance.name}/{filename}' def product_directory_path(instance, filename): # To save product logo in custom path return f'{instance.brand.name}_{instance.name}_{instance.id}/{filename}' class Category(models.Model): name = models.CharField(max_length=30) name_persian = models.CharField(max_length=30, blank=True) background_image = models.ImageField(upload_to=category_directory_path, default='category_default.jpg') tags = TaggableManager(blank=True) slug = models.SlugField() class Meta: ordering = ['name'] def __str__(self): return self.name def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(self.name) super().save(*args, **kwargs) class Brand(models.Model): name = models.CharField(max_length=30) country = CountryField(blank=True) logo = models.ImageField(upload_to=brand_directory_path, blank=True) founded = models.CharField(max_length=6, blank=True, choices=founded_choice()) founder = models.CharField(max_length=10, blank=True) tags = TaggableManager(blank=True) slug = models.SlugField() class Meta: ordering = ['name'] def __str__(self): return self.name def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(self.name) super().save(*args, **kwargs) class Product(models.Model): product_id = models.CharField(default=uuid.uuid4, max_length=36) name = models.CharField(max_length=30) category = models.ManyToManyField(to=Category, related_name='product_category') brand = models.ForeignKey(to=Brand, related_name='Product_brand', on_delete=models.CASCADE) price = models.PositiveIntegerField(default=0) in_stock = models.PositiveIntegerField(default=0) available = models.BooleanField(default=False) discount_value = models.PositiveIntegerField(default=0) discount_percent = models.FloatField(validators=[MaxValueValidator(100), MinValueValidator(0), discount_model_validator], default=0) special_offer = models.BooleanField(default=False) picture = models.ImageField(blank=True, upload_to=product_directory_path) description = RichTextField(blank=True) review = RichTextField(blank=True) tag = TaggableManager(blank=True) slug = models.SlugField() created = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) comment = GenericRelation(Comment) class Meta: ordering = ['name', 'available'] def __str__(self): return self.name def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(f'{self.brand.name}_{self.name}') super().save(*args, **kwargs) def get_absolute_url(self): return reverse('shop:product_detail', kwargs={'product_id': self.product_id}) class ProductImages(models.Model): product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='product_images') image = models.ImageField(blank=True) created = models.DateTimeField(auto_now_add=True) def __str__(self): return f'{self.product.brand.name}_{self.product.name}_image{self.id}' class Camera(models.Model): product = models.OneToOneField(Product, on_delete=models.CASCADE, related_name='camera', null=True) main_camera = models.CharField(max_length=20) lcd = models.CharField(max_length=20)
UTF-8
Python
false
false
5,013
py
62
models.py
49
0.635348
0.629164
0
145
33.572414
108
shakibaniataha/script-manager
5,686,536,713,826
50f1af949336a7c9bccb54cbf67b65b3ccdf6d64
ec80e84042e87a01da51937b6d4ffc01eb87c848
/main/models.py
b1983c10c0223a8a79b533727e8beddded08e3f7
[]
no_license
https://github.com/shakibaniataha/script-manager
276fd5fb4e5426b388c7da17603d835fe4625967
55c78afb6a138b536e7f9c049bac23fab4ab81d5
refs/heads/master
"2021-09-03T13:10:26.711594"
"2018-01-09T08:51:08"
"2018-01-09T08:51:08"
113,153,933
1
1
null
false
"2022-11-20T19:10:03"
"2017-12-05T08:24:28"
"2018-01-06T10:29:21"
"2018-01-09T08:51:14"
236
1
0
0
JavaScript
false
false
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models from django.contrib.auth.models import User from validators import validate_comma_separated, validate_command class API(models.Model): name = models.CharField(max_length=100, unique=True) date_added = models.DateTimeField(auto_now_add=True) command = models.CharField(max_length=200, validators=[validate_command]) output_files = models.CharField(max_length=200, blank=True, validators=[validate_comma_separated]) description = models.TextField() num_input_params = models.IntegerField(default=0) authorized_groups = models.CharField(default='guest', max_length=200, validators=[validate_comma_separated]) def __str__(self): return self.name class Request(models.Model): REQUEST_STATUS = ( ('processing', 'Processing'), ('finished', 'Finished'), ) api_id = models.ForeignKey(API) input_params = models.CharField(max_length=200, blank=True, validators=[validate_comma_separated]) owner = models.ForeignKey(User, null=True) status = models.CharField(max_length=20, choices=REQUEST_STATUS) date_added = models.DateTimeField(auto_now_add=True) def __str__(self): return str(self.id) + ": " + self.api_id.name
UTF-8
Python
false
false
1,301
py
20
models.py
12
0.704074
0.68947
0
35
36.2
112
suiguan/WebPageRecommander
5,823,975,668,817
809c14bcbda1f95d10bb5887ad79891aab9b996f
ee982a4821094c96c377df3c87e25f697602a194
/WeblogParser.py
a78c537e428a522c4842b814190d16c814a00a3c
[]
no_license
https://github.com/suiguan/WebPageRecommander
837f2477de48043eaf42aec6bfa04cd9a4cbcfa8
fd55958fb82d120417d165e08cfbe42213a9aa16
refs/heads/master
"2021-01-19T15:25:39.571487"
"2017-05-08T23:39:29"
"2017-05-08T23:39:29"
88,214,691
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sys import operator class Weblog_Parser: def __init__(self, input_weblog): self.weblog = input_weblog self.next_web_id = 0 self.web_id_table = {} self.user_access_log_table = {} def get_web_id(self, webpage): if not webpage in self.web_id_table: self.web_id_table[webpage] = self.next_web_id self.next_web_id += 1 return self.web_id_table[webpage] def add_user_access(self, user, webpage): wid = self.get_web_id(webpage) if not user in self.user_access_log_table: self.user_access_log_table[user] = set([]) self.user_access_log_table[user].add(wid) def dump_forrmated_log(self): for user in self.user_access_log_table.keys(): line = "" for web in self.user_access_log_table[user]: line += "%d," % web line = line[:-1] + "\n" self.formated_weblog.write(line) def dump_web_id_table(self): sorted_table = sorted(self.web_id_table.items(), key=operator.itemgetter(1)) for web, wid in sorted_table: self.weblog_id_lookup.write("%d --- %s\n" % (wid, web)) def parse(self, out_formatted_log, out_id_lookup): #open all files self.input_weblog = open(self.weblog, "r") self.formated_weblog = open(out_formatted_log, "w") self.weblog_id_lookup = open(out_id_lookup, "w") #start parsing #an example of weblog: #199.72.81.55 - - [01/Jul/1995:00:00:01 -0400] "GET /history/apollo/ HTTP/1.0" 200 6245 for line in self.input_weblog: try: tokens = line.split() user = tokens[0] resp_code = int(tokens[-2]) web_req = line.split('"')[1] web_req_tokens = web_req.split() if len(web_req_tokens) < 2: continue #ignore request that doesn't have valid HTTP request method webpage = web_req.split()[1] if (resp_code >= 200 and resp_code <= 299): #only record valid http response code self.add_user_access(user, webpage) #TODO: also consider if URL belongs to the same webpage previously seen) except Exception, e: print("ignore invalid formatted line: %s" % line) #print(e) continue #the first line of the formatted web log contain two number seperated by comma, #the first is total number of webpages in the domain #the second is the total number of users self.formated_weblog.write("%d,%d\n" % (len(self.web_id_table.keys()), len(self.user_access_log_table.keys()))) #save outputs self.dump_forrmated_log() self.dump_web_id_table() #close all files self.input_weblog.close() self.formated_weblog.close() self.weblog_id_lookup.close() def usage(prog): print("Usage: python %s <weblog file> <formatted web log output filename> <web id lookup filename>" % prog) print(" where in <weblog>, each line has the format: host - timestamp - request -HTTP reply code - bytes in the reply") print(" in <formatted web log>, first line is <number of web pages>,<number of users>, then following each line indicates the web has been access by the user (user id hidden)") print(" in <web id lookup file>, we can find out which id means which web page") sys.exit(-1); def main(argv): if len(argv) != 4: usage(argv[0]) parser = Weblog_Parser(argv[1]) parser.parse(argv[2], argv[3]) if __name__ == "__main__": main(sys.argv)
UTF-8
Python
false
false
3,533
py
5
WeblogParser.py
3
0.60685
0.591282
0
92
37.402174
191
jraygauthier/nixos-secure-factory
2,765,958,971,024
dc3fd311dd296dee68525a723fa4ffbea734e78d
41c71d20b60754b0e1a3cfbace693b863d03c977
/scripts/factory-install/py/tests/test_misc.py
1ab9ac3aaa873f0ebcfa3ba9366927a78d75b3bf
[ "Apache-2.0" ]
permissive
https://github.com/jraygauthier/nixos-secure-factory
e4f85cc67cdee472ac9273dcebe34e036432bf67
67adb93b153902226301d4a68e7b6e3a0af660c2
refs/heads/master
"2022-10-02T06:04:38.479606"
"2021-03-31T16:44:29"
"2021-03-31T16:45:43"
199,702,396
2
2
Apache-2.0
false
"2021-11-11T13:46:02"
"2019-07-30T18:00:37"
"2021-04-17T10:13:42"
"2021-11-11T13:45:39"
931
1
2
0
Shell
false
false
from nsf_factory_install import my_fn def test_misc(): my_fn()
UTF-8
Python
false
false
69
py
332
test_misc.py
216
0.666667
0.666667
0
5
12.8
37
juraj80/myPythonCookbook
1,623,497,653,866
78f3b646e0397c229dd4b5e5b3801211de3f7ecb
a24391f3bc3ba4f73db66c723964fe3e91a2f63c
/days/52-54-feedparser/code_starter/parser.py
d80a822bd46cee504281928918efd277d8d837b9
[]
no_license
https://github.com/juraj80/myPythonCookbook
41ccbe6262ea53428f04ba2ea80c90c624e2751d
f543d962ec75a3adadf0164c7ebf848b4e149296
refs/heads/master
"2020-04-14T05:38:32.689829"
"2019-03-31T20:33:26"
"2019-03-31T20:33:26"
163,664,856
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import feedparser FEED_FILE = "newreleases.xml" print(FEED_FILE) # feed = feedparser.parse(FEED_FILE) feed = feedparser.parse('newreleases.xml') print('Feed Title: {}'.format(feed['feed']['title'])) if 'title' in feed.entries[0]: for entry in feed.entries: print(f'{entry.published} - {entry.title} : {entry.link}')
UTF-8
Python
false
false
332
py
83
parser.py
55
0.677711
0.674699
0
13
24.538462
66
TrendingTechnology/hspylib
12,051,678,274,976
1402a2a640041594e464cddeff1e5e224a4bdfce
153da69b35f032f5b83a06f17008ba41a1b336b4
/src/main/hspylib/core/enums/http_code.py
95211125328f1be82ad6c933f1f68a3f7ae5cb16
[ "MIT" ]
permissive
https://github.com/TrendingTechnology/hspylib
6400cadf9dfe6ab5733712dcfeccf8022d61c589
c79a2c17e89fe21d00ccd9c1646a03407cd61839
refs/heads/master
"2023-06-20T15:47:35.962661"
"2021-07-19T22:12:18"
"2021-07-19T23:45:41"
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ TODO Purpose of the file @project: HSPyLib hspylib.main.hspylib.core.enum @file: http_code.py @created: Tue, 4 May 2021 @author: <B>H</B>ugo <B>S</B>aporetti <B>J</B>unior" @site: https://github.com/yorevs/hspylib @license: MIT - Please refer to <https://opensource.org/licenses/MIT> Copyright 2021, HSPyLib team """ from http import HTTPStatus from hspylib.core.enums.enumeration import Enumeration class HttpCode(Enumeration): # 2xx Success OK = HTTPStatus.OK CREATED = HTTPStatus.CREATED ACCEPTED = HTTPStatus.ACCEPTED NON_AUTHORITATIVE_INFORMATION = HTTPStatus.NON_AUTHORITATIVE_INFORMATION NO_CONTENT = HTTPStatus.NO_CONTENT RESET_CONTENT = HTTPStatus.RESET_CONTENT PARTIAL_CONTENT = HTTPStatus.PARTIAL_CONTENT MULTI_STATUS = HTTPStatus.MULTI_STATUS IM_USED = HTTPStatus.IM_USED # 3xx Redirection MULTIPLE_CHOICES = HTTPStatus.MULTIPLE_CHOICES MOVED_PERMANENTLY = HTTPStatus.MOVED_PERMANENTLY FOUND = HTTPStatus.FOUND SEE_OTHER = HTTPStatus.SEE_OTHER NOT_MODIFIED = HTTPStatus.NOT_MODIFIED USE_PROXY = HTTPStatus.USE_PROXY TEMPORARY_REDIRECT = HTTPStatus.TEMPORARY_REDIRECT PERMANENT_REDIRECT = HTTPStatus.PERMANENT_REDIRECT # 4xx Client Error BAD_REQUEST = HTTPStatus.BAD_REQUEST UNAUTHORIZED = HTTPStatus.UNAUTHORIZED PAYMENT_REQUIRED = HTTPStatus.PAYMENT_REQUIRED FORBIDDEN = HTTPStatus.FORBIDDEN NOT_FOUND = HTTPStatus.NOT_FOUND METHOD_NOT_ALLOWED = HTTPStatus.METHOD_NOT_ALLOWED NOT_ACCEPTABLE = HTTPStatus.NOT_ACCEPTABLE PROXY_AUTHENTICATION_REQUIRED = HTTPStatus.PROXY_AUTHENTICATION_REQUIRED REQUEST_TIMEOUT = HTTPStatus.REQUEST_TIMEOUT CONFLICT = HTTPStatus.CONFLICT GONE = HTTPStatus.GONE LENGTH_REQUIRED = HTTPStatus.LENGTH_REQUIRED PRECONDITION_FAILED = HTTPStatus.PRECONDITION_FAILED PAYLOAD_TOO_LARGE = 413 REQUEST_URI_TOO_LONG = HTTPStatus.REQUEST_URI_TOO_LONG UNSUPPORTED_MEDIA_TYPE = HTTPStatus.UNSUPPORTED_MEDIA_TYPE REQUESTED_RANGE_NOT_SATISFIABLE = HTTPStatus.REQUESTED_RANGE_NOT_SATISFIABLE EXPECTATION_FAILED = HTTPStatus.EXPECTATION_FAILED UNPROCESSABLE_ENTITY = HTTPStatus.UNPROCESSABLE_ENTITY LOCKED = HTTPStatus.LOCKED FAILED_DEPENDENCY = HTTPStatus.FAILED_DEPENDENCY UPGRADE_REQUIRED = HTTPStatus.UPGRADE_REQUIRED PRECONDITION_REQUIRED = HTTPStatus.PRECONDITION_REQUIRED TOO_MANY_REQUESTS = HTTPStatus.TOO_MANY_REQUESTS REQUEST_HEADER_FIELDS_TOO_LARGE = HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE CONNECTION_CLOSED_WITHOUT_RESPONSE = 444 UNAVAILABLE_FOR_LEGAL_REASONS = 451 CLIENT_CLOSED_REQUEST = 499 # 5xx Server Error INTERNAL_SERVER_ERROR = HTTPStatus.INTERNAL_SERVER_ERROR NOT_IMPLEMENTED = HTTPStatus.NOT_IMPLEMENTED BAD_GATEWAY = HTTPStatus.BAD_GATEWAY SERVICE_UNAVAILABLE = HTTPStatus.SERVICE_UNAVAILABLE GATEWAY_TIMEOUT = HTTPStatus.GATEWAY_TIMEOUT HTTP_VERSION_NOT_SUPPORTED = HTTPStatus.HTTP_VERSION_NOT_SUPPORTED VARIANT_ALSO_NEGOTIATES = HTTPStatus.VARIANT_ALSO_NEGOTIATES INSUFFICIENT_STORAGE = HTTPStatus.INSUFFICIENT_STORAGE LOOP_DETECTED = HTTPStatus.LOOP_DETECTED NOT_EXTENDED = HTTPStatus.NOT_EXTENDED NETWORK_AUTHENTICATION_REQUIRED = HTTPStatus.NETWORK_AUTHENTICATION_REQUIRED NETWORK_CONNECT_TIMEOUT_ERROR = 599
UTF-8
Python
false
false
3,401
py
311
http_code.py
249
0.752132
0.743311
0
83
39.975904
80
SebastianFGarcia/Django-AngularLearn
3,530,463,150,767
ba411d4ce91518247b4e0b4113b64dddefb5281f
5db9b8951392c439d209d4a8d4433c331337839b
/DjangoAPI/EmployeApp/models.py
b14042319b4708b3ca3372bf1c7bd3f4ce0b37bd
[]
no_license
https://github.com/SebastianFGarcia/Django-AngularLearn
72e9bddef792ac9eeec77a9cecd8c7e34f07177f
11ceac7c6b68bc9dc89cdb405b39c0654882e6cd
refs/heads/master
"2023-03-27T16:39:01.617173"
"2021-03-28T05:43:37"
"2021-03-28T05:43:37"
350,046,851
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.db import models class Departaments(models.Model): DepartamentId = models.AutoField(primary_key = True) DepartamentName = models.CharField(max_length = 100) class Employes(models.Model): EmployeId = models.AutoField(primary_key = True) EmployeName = models.CharField(max_length = 100) Departament = models.CharField(max_length = 100) DateOfJoining = models.DateField() PhotoFileName = models.CharField(max_length = 100)
UTF-8
Python
false
false
461
py
13
models.py
10
0.741866
0.715835
0
12
37.5
56
merc-devel/merc
17,248,588,670,981
9a3e4ab029242b6436d328d4c6ebd32301ae8df6
c02b157399f2ede41abf5119e57f94bfe18c713d
/merc/features/rfc1459/ban.py
a7c2fda51ae99dcb094c2cfbdeb739610fc56e99
[ "MIT" ]
permissive
https://github.com/merc-devel/merc
b366befb6285af984c2da7eabdd1063f16e0414e
15e010db2474b5d9f9720fc83983b03c95063a02
refs/heads/master
"2021-01-18T17:15:33.553125"
"2014-11-08T03:02:30"
"2014-11-08T03:02:30"
25,289,852
4
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import collections import datetime from merc import errors from merc import feature from merc import message from merc import mode BanDetail = collections.namedtuple("BanDetail", ["server", "creation_time"]) class BanFeature(feature.Feature): NAME = __name__ install = BanFeature.install class BanList(message.Reply): NAME = "367" MIN_ARITY = 4 def __init__(self, channel_name, mask, server_name, creation_time, *args): self.channel_name = channel_name self.mask = mask self.server_name = server_name self.creation_time = creation_time def as_reply_params(self): return [self.channel_name, self.mask, self.server_name, self.creation_time] class EndOfBanList(message.Reply): NAME = "368" FORCE_TRAILING = True MIN_ARITY = 2 def __init__(self, channel_name, reason="End of channel ban list", *args): self.channel_name = channel_name self.reason = reason def as_reply_params(self): return [self.channel_name, self.reason] @BanFeature.register_channel_mode class BanMask(mode.ListMode, mode.ChanModeMixin): CHAR = "b" def list(self, user): locals = self.target.get_feature_locals(BanFeature) bans = locals.get("bans", {}) for mask, detail in sorted(bans.items(), key=lambda v: v[1].creation_time, reverse=True): user.send_reply(BanList(self.target.name, mask, detail.server, str(int(detail.creation_time.timestamp())))) user.send_reply(EndOfBanList(self.target.name)) def add(self, app, user, value): locals = self.target.get_feature_locals(BanFeature) bans = locals.setdefault("bans", {}) if value in bans: return False bans[value] = BanDetail(app.server.name, datetime.datetime.now()) return True def remove(self, app, user, value): locals = self.target.get_feature_locals(BanFeature) bans = locals.get("bans", {}) if value not in bans: return False del bans[value] return True @BanFeature.hook("channel.join.check") def check_channel_ban(app, target, channel, key): locals = channel.get_feature_locals(BanFeature) for mask in locals.get("bans", {}): if target.hostmask_matches(mask): raise errors.BannedFromChannel(channel.name) app.run_hooks("channel.join.check_ban", target, channel, mask) @BanFeature.hook("channel.message.check") def check_can_message_channel(app, target, channel): locals = channel.get_feature_locals(BanFeature) for mask in locals.get("bans", {}): if target.hostmask_matches(mask): channel.check_is_voiced(target) app.run_hooks("channel.message.check_ban", target, channel, mask)
UTF-8
Python
false
false
2,710
py
83
ban.py
80
0.670111
0.66679
0
103
25.31068
76
mottosso/cortex
12,343,736,016,481
c1b5033ccefcd7c372bf44f05c4067b8843c9e97
ee3785eb0205893a3c5a0946119f59482bd8edc6
/contrib/IECoreArnold/test/IECoreArnold/RendererTest.py
2a0a63b75be8a9dddbc555d89e01c5aa4a31ea49
[ "MIT" ]
permissive
https://github.com/mottosso/cortex
46f80d093a80bfe1926fd8f6b73d915dcd87f1dd
e2600cef60d380e4fd5e06b8f2fe18c9b52f5afd
refs/heads/master
"2021-01-22T21:27:52.079668"
"2016-08-24T06:52:48"
"2016-08-24T06:52:48"
66,432,611
0
0
null
true
"2016-08-24T05:22:41"
"2016-08-24T05:22:40"
"2016-08-13T09:42:18"
"2016-08-22T22:13:51"
107,482
0
0
0
null
null
null
########################################################################## # # Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved. # Copyright (c) 2012, John Haddon. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## from __future__ import with_statement import os import unittest import arnold import IECore import IECoreArnold class RendererTest( unittest.TestCase ) : __displayFileName = "contrib/IECoreArnold/test/IECoreArnold/output.tif" __assFileName = "contrib/IECoreArnold/test/IECoreArnold/output.ass" def testTypeId( self ) : self.assertEqual( IECoreArnold.Renderer().typeId(), IECoreArnold.Renderer.staticTypeId() ) self.assertNotEqual( IECoreArnold.Renderer.staticTypeId(), IECore.Renderer.staticTypeId() ) def testTypeName( self ) : r = IECoreArnold.Renderer() self.assertEqual( r.typeName(), "IECoreArnold::Renderer" ) def testOptions( self ) : r = IECoreArnold.Renderer() # check we can set an already existing int self.assertEqual( r.getOption( "ai:AA_samples" ), IECore.IntData( 1 ) ) r.setOption( "ai:AA_samples", IECore.IntData( 11 ) ) self.assertEqual( r.getOption( "ai:AA_samples" ), IECore.IntData( 11 ) ) # check we can set an already existing float self.assertEqual( r.getOption( "ai:auto_transparency_threshold" ), IECore.FloatData( .99 ) ) r.setOption( "ai:auto_transparency_threshold", IECore.FloatData( .9 ) ) self.assertEqual( r.getOption( "ai:auto_transparency_threshold" ), IECore.FloatData( .9 ) ) # check tbat trying to set nonexistent options yields a message m = IECore.CapturingMessageHandler() with m : r.setOption( "ai:thisIsNotAnArnoldOption", IECore.IntData( 10 ) ) self.assertEqual( len( m.messages ), 1 ) self.assertEqual( m.messages[-1].level, IECore.Msg.Level.Warning ) self.assertEqual( m.messages[-1].message, "Unknown option \"ai:thisIsNotAnArnoldOption\"." ) # check that setting user options works r.setOption( "user:myLovelyUserOption", IECore.StringData( "oooh!" ) ) self.assertEqual( r.getOption( "user:myLovelyUserOption" ), IECore.StringData( "oooh!" ) ) # check that set/get for other renderers is ignored r.setOption( "ri:pixelSamples", IECore.V2iData( IECore.V2i( 1, 1 ) ) ) self.assertEqual( r.getOption( "ri:pixelSamples" ), None ) def testDisplay( self ) : r = IECoreArnold.Renderer() self.failIf( os.path.exists( self.__displayFileName ) ) r.display( self.__displayFileName, "driver_tiff", "rgba", {} ) with IECore.WorldBlock( r ) : r.sphere( 1, -1, 1, 360, {} ) self.failUnless( os.path.exists( self.__displayFileName ) ) def testDisplayTypeMapping( self ) : r = IECoreArnold.Renderer() self.failIf( os.path.exists( self.__displayFileName ) ) r.display( self.__displayFileName, "tiff", "rgba", {} ) with IECore.WorldBlock( r ) : r.sphere( 1, -1, 1, 360, {} ) self.failUnless( os.path.exists( self.__displayFileName ) ) def testDisplayDriverIntegration( self ) : r = IECoreArnold.Renderer() r.display( "test", "ieDisplay", "rgba", { "driverType" : "ImageDisplayDriver", "handle" : "testHandle" } ) with IECore.WorldBlock( r ) : r.sphere( 1, -1, 1, 360, {} ) self.failUnless( IECore.ImageDisplayDriver.removeStoredImage( "testHandle" ) ) def testASSOutput( self ) : r = IECoreArnold.Renderer( self.__assFileName ) self.failIf( os.path.exists( self.__assFileName ) ) with IECore.WorldBlock( r ) : r.sphere( 1, -1, 1, 360, {} ) self.failUnless( os.path.exists( self.__assFileName ) ) def testUserAttributes( self ) : r = IECoreArnold.Renderer() r.setAttribute( "user:a", IECore.IntData( 10 ) ) self.assertEqual( r.getAttribute( "user:a" ), IECore.IntData( 10 ) ) with IECore.WorldBlock( r ) : self.assertEqual( r.getAttribute( "user:a" ), IECore.IntData( 10 ) ) r.setAttribute( "user:a", IECore.IntData( 20 ) ) self.assertEqual( r.getAttribute( "user:a" ), IECore.IntData( 20 ) ) with IECore.AttributeBlock( r ) : r.setAttribute( "user:a", IECore.IntData( 30 ) ) self.assertEqual( r.getAttribute( "user:a" ), IECore.IntData( 30 ) ) self.assertEqual( r.getAttribute( "user:a" ), IECore.IntData( 20 ) ) def testShader( self ) : r = IECoreArnold.Renderer() r.display( "test", "ieDisplay", "rgba", { "driverType" : "ImageDisplayDriver", "handle" : "test" } ) with IECore.WorldBlock( r ) : r.shader( "surface", "standard", { "emission" : 1.0, "emission_color" : IECore.Color3f( 1, 0, 0 ) } ) r.sphere( 1, -1, 1, 360, {} ) image = IECore.ImageDisplayDriver.removeStoredImage( "test" ) e = IECore.PrimitiveEvaluator.create( image ) result = e.createResult() e.pointAtUV( IECore.V2f( 0.5, 0.5 ), result ) self.assertAlmostEqual( result.floatPrimVar( e.A() ), 1, 5 ) self.assertAlmostEqual( result.floatPrimVar( e.R() ), 1, 5 ) self.assertEqual( result.floatPrimVar( e.G() ), 0 ) self.assertEqual( result.floatPrimVar( e.B() ), 0 ) def testReferenceExistingShader( self ) : r = IECoreArnold.Renderer() r.display( "test", "ieDisplay", "rgba", { "driverType" : "ImageDisplayDriver", "handle" : "test" } ) with IECore.WorldBlock( r ) : shader = arnold.AiNode( "standard" ) arnold.AiNodeSetStr( shader, "name", "red_shader" ) arnold.AiNodeSetFlt( shader, "emission", 1 ) arnold.AiNodeSetRGB( shader, "emission_color", 1, 0, 0 ) r.shader( "surface", "reference:red_shader", {} ) r.sphere( 1, -1, 1, 360, {} ) image = IECore.ImageDisplayDriver.removeStoredImage( "test" ) e = IECore.PrimitiveEvaluator.create( image ) result = e.createResult() e.pointAtUV( IECore.V2f( 0.5, 0.5 ), result ) self.assertAlmostEqual( result.floatPrimVar( e.A() ), 1, 5 ) self.assertAlmostEqual( result.floatPrimVar( e.R() ), 1, 5 ) self.assertEqual( result.floatPrimVar( e.G() ), 0 ) self.assertEqual( result.floatPrimVar( e.B() ), 0 ) def testNonexistentReferencedShader( self ) : r = IECoreArnold.Renderer() with IECore.WorldBlock( r ) : m = IECore.CapturingMessageHandler() with m : r.shader( "surface", "reference:doesntExist", {} ) self.assertEqual( len( m.messages ), 1 ) self.failUnless( "Couldn't find shader" in m.messages[0].message ) def testUnloadableShader( self ) : r = IECoreArnold.Renderer() with IECore.WorldBlock( r ) : m = IECore.CapturingMessageHandler() with m : r.shader( "surface", "thisShaderDoesNotExist", {} ) self.assertEqual( len( m.messages ), 1 ) def testUnsupportedShaderType( self ) : r = IECoreArnold.Renderer() with IECore.WorldBlock( r ) : m = IECore.CapturingMessageHandler() with m : r.shader( "thisShaderTypeDoesntExist", "utility", {} ) self.assertEqual( len( m.messages ), 1 ) def testOtherRendererShaderType( self ) : r = IECoreArnold.Renderer() with IECore.WorldBlock( r ) : m = IECore.CapturingMessageHandler() with m : r.shader( "ri:surface", "something", {} ) self.assertEqual( len( m.messages ), 0 ) def testDefaultCamera( self ) : # render a plane at z==0 and check we can't see it with the default camera m = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -0.1 ), IECore.V2f( 0.1 ) ) ) r = IECoreArnold.Renderer() r.display( "test", "ieDisplay", "rgba", { "driverType" : "ImageDisplayDriver", "handle" : "test" } ) r.setOption( "ai:AA_samples", IECore.IntData( 3 ) ) with IECore.WorldBlock( r ) : m.render( r ) image = IECore.ImageDisplayDriver.removeStoredImage( "test" ) self.assertEqual( image.dataWindow, IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 639, 479 ) ) ) e = IECore.PrimitiveEvaluator.create( image ) result = e.createResult() e.pointAtUV( IECore.V2f( 0.5, 0.5 ), result ) self.failUnless( result.floatPrimVar( image["A"] ) < 0.5 ) # move the plane back a touch and check we can see it with the default camera del r # must destroy the existing renderer before making a new one r = IECoreArnold.Renderer() r.display( "test", "ieDisplay", "rgba", { "driverType" : "ImageDisplayDriver", "handle" : "test" } ) r.setOption( "ai:AA_samples", IECore.IntData( 3 ) ) with IECore.WorldBlock( r ) : with IECore.TransformBlock( r ) : r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -1 ) ) ) m.render( r ) image = IECore.ImageDisplayDriver.removeStoredImage( "test" ) self.assertEqual( image.dataWindow, IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 639, 479 ) ) ) e = IECore.PrimitiveEvaluator.create( image ) result = e.createResult() e.pointAtUV( IECore.V2f( 0.5, 0.5 ), result ) self.failUnless( result.floatPrimVar( image["A"] ) > 0.9 ) # move the camera back a touch and check we can see the plane at z==0 del r # must destroy the existing renderer before making a new one r = IECoreArnold.Renderer() r.display( "test", "ieDisplay", "rgba", { "driverType" : "ImageDisplayDriver", "handle" : "test" } ) r.setOption( "ai:AA_samples", IECore.IntData( 3 ) ) with IECore.TransformBlock( r ) : r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, 1 ) ) ) r.camera( "main", {} ) with IECore.WorldBlock( r ) : m.render( r ) image = IECore.ImageDisplayDriver.removeStoredImage( "test" ) self.assertEqual( image.dataWindow, IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 639, 479 ) ) ) e = IECore.PrimitiveEvaluator.create( image ) result = e.createResult() e.pointAtUV( IECore.V2f( 0.5, 0.5 ), result ) self.failUnless( result.floatPrimVar( image["A"] ) > 0.9 ) def testCameraXYOrientation( self ) : # render a red square at x==1, and a green one at y==1 r = IECoreArnold.Renderer() r.display( "test", "ieDisplay", "rgba", { "driverType" : "ImageDisplayDriver", "handle" : "test" } ) with IECore.TransformBlock( r ) : r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, 1 ) ) ) r.camera( "main", { "resolution" : IECore.V2iData( IECore.V2i( 512 ) ) } ) with IECore.WorldBlock( r ) : r.shader( "surface", "utility", { "color" : IECore.Color3f( 1, 0, 0 ) } ) IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( 0.75, -0.25 ), IECore.V2f( 1.25, 0.25 ) ) ).render( r ) r.shader( "surface", "utility", { "color" : IECore.Color3f( 0, 1, 0 ) } ) IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -0.25, 0.75 ), IECore.V2f( 0.25, 1.25 ) ) ).render( r ) # check we get the colors we'd expect where we expect them image = IECore.ImageDisplayDriver.removeStoredImage( "test" ) self.failUnless( image is not None ) e = IECore.PrimitiveEvaluator.create( image ) result = e.createResult() a = e.A() r = e.R() g = e.G() b = e.B() e.pointAtUV( IECore.V2f( 1, 0.5 ), result ) self.assertAlmostEqual( result.floatPrimVar( a ), 1, 5 ) self.assertAlmostEqual( result.floatPrimVar( r ), 1, 5 ) self.assertEqual( result.floatPrimVar( g ), 0 ) self.assertEqual( result.floatPrimVar( b ), 0 ) e.pointAtUV( IECore.V2f( 0.5, 0 ), result ) self.assertAlmostEqual( result.floatPrimVar( a ), 1, 5 ) self.assertEqual( result.floatPrimVar( r ), 0 ) self.assertAlmostEqual( result.floatPrimVar( g ), 1, 5 ) self.assertEqual( result.floatPrimVar( b ), 0 ) def testCameraAspectRatio( self ) : r = IECoreArnold.Renderer() r.camera( "main", { "resolution" : IECore.V2i( 640, 480 ), "screenWindow" : IECore.Box2f( IECore.V2f( 0 ), IECore.V2f( 640, 480 ) ) } ) r.display( "test", "ieDisplay", "rgba", { "driverType" : "ImageDisplayDriver", "handle" : "test" } ) with IECore.WorldBlock( r ) : r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) ) r.shader( "surface", "utility", { "shading_mode" : "flat", "color" : IECore.Color3f( 1, 0, 0 ) } ) IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( 2 ), IECore.V2f( 638, 478 ) ) ).render( r ) r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -1 ) ) ) r.shader( "surface", "utility", { "shade_mode" : "flat", "color" : IECore.Color3f( 0, 1, 0 ) } ) IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( 0 ), IECore.V2f( 640, 480 ) ) ).render( r ) image = IECore.ImageDisplayDriver.removeStoredImage( "test" ) self.failUnless( image is not None ) e = IECore.PrimitiveEvaluator.create( image ) result = e.createResult() r = e.R() g = e.G() edges = [ IECore.V2i( 0 ), IECore.V2i( 320, 0 ), IECore.V2i( 639, 0 ), IECore.V2i( 639, 240 ), IECore.V2i( 639, 479 ), IECore.V2i( 320, 479 ), IECore.V2i( 0, 479 ), IECore.V2i( 0, 240 ), ] for point in edges : self.failUnless( e.pointAtPixel( point, result ) ) self.failUnless( result.floatPrimVar( r ) < 0.1 ) self.failUnless( result.floatPrimVar( g ) > 0.8 ) innerEdges = [ IECore.V2i( 3, 3 ), IECore.V2i( 320, 3 ), IECore.V2i( 637, 3 ), IECore.V2i( 636, 240 ), IECore.V2i( 636, 477 ), IECore.V2i( 320, 477 ), IECore.V2i( 3, 477 ), IECore.V2i( 3, 240 ), ] for point in innerEdges : self.failUnless( e.pointAtPixel( point, result ) ) self.failUnless( result.floatPrimVar( r ) > 0.8 ) self.failUnless( result.floatPrimVar( g ) < 0.1 ) def testProcedural( self ) : attributeValues = [] class TestProcedural( IECore.Renderer.Procedural ) : def __init__( self ) : IECore.Renderer.Procedural.__init__( self ) def bound( self ) : return IECore.Box3f( IECore.V3f( -1 ), IECore.V3f( 1 ) ) def render( self, renderer ) : t = renderer.getAttribute( "user:test" ).value attributeValues.append( t ) renderer.sphere( 1, -1, 1, 360, {} ) def hash( self ): h = IECore.MurmurHash() return h r = IECoreArnold.Renderer() r.display( "test", "ieDisplay", "rgba", { "driverType" : "ImageDisplayDriver", "handle" : "test" } ) with IECore.WorldBlock( r ) : r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) ) r.setAttribute( "user:test", IECore.IntData( 0 ) ) r.procedural( TestProcedural() ) r.setAttribute( "user:test", IECore.IntData( 1 ) ) r.procedural( TestProcedural() ) self.assertEqual( len( attributeValues ), 2 ) self.failUnless( 1 in attributeValues ) self.failUnless( 0 in attributeValues ) def performCurvesTest( self, curvesPrimitive, expectedImage ) : r = IECoreArnold.Renderer() r.setOption( "ai:AA_samples", IECore.IntData( 3 ) ) r.display( "test", "ieDisplay", "rgba", { "driverType" : "ImageDisplayDriver", "handle" : "test" } ) with IECore.TransformBlock( r ) : r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, 2 ) ) ) r.camera( "main", { "resolution" : IECore.V2i( 512 ), "projectin" : "orthographic", "screenWindow" : IECore.Box2f( IECore.V2f( 0 ), IECore.V2f( 1 ) ), } ) with IECore.WorldBlock( r ) : curvesPrimitive.render( r ) image = IECore.ImageDisplayDriver.removeStoredImage( "test" ) del image["A"] # raise blackPoint massively to remove possible watermark IECore.Grade()( input = image, copyInput = False, blackPoint = IECore.Color3f( 0.9 ) ) expectedImage = IECore.Reader.create( expectedImage ).read() self.assertEqual( IECore.ImageDiffOp()( imageA=image, imageB=expectedImage, maxError=0.01 ), IECore.BoolData( False ) ) def testBezierCurves( self ) : c = IECore.CurvesPrimitive( IECore.IntVectorData( [ 4 ] ), IECore.CubicBasisf.bezier(), False, IECore.V3fVectorData( [ IECore.V3f( 0.8, 0.2, 0 ), IECore.V3f( 0.2, 0.2, 0 ), IECore.V3f( 0.2, 0.8, 0 ), IECore.V3f( 0.8, 0.8, 0 ), ] ) ) c["width"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 0.05 ) ) self.performCurvesTest( c, "contrib/IECoreArnold/test/IECoreArnold/data/curveImages/bezier.exr" ) def testBSplineCurves( self ) : c = IECore.CurvesPrimitive( IECore.IntVectorData( [ 4 ] ), IECore.CubicBasisf.bSpline(), False, IECore.V3fVectorData( [ IECore.V3f( 0.8, 0.2, 0 ), IECore.V3f( 0.2, 0.2, 0 ), IECore.V3f( 0.2, 0.8, 0 ), IECore.V3f( 0.8, 0.8, 0 ), ] ) ) c["width"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 0.05 ) ) self.performCurvesTest( c, "contrib/IECoreArnold/test/IECoreArnold/data/curveImages/bSpline.exr" ) def testVisibilityAttributes( self ) : r = IECoreArnold.Renderer() self.assertEqual( r.getAttribute( "ai:visibility:camera" ), IECore.BoolData( True ) ) self.assertEqual( r.getAttribute( "ai:visibility:shadow" ), IECore.BoolData( True ) ) self.assertEqual( r.getAttribute( "ai:visibility:reflected" ), IECore.BoolData( True ) ) self.assertEqual( r.getAttribute( "ai:visibility:refracted" ), IECore.BoolData( True ) ) self.assertEqual( r.getAttribute( "ai:visibility:diffuse" ), IECore.BoolData( True ) ) self.assertEqual( r.getAttribute( "ai:visibility:glossy" ), IECore.BoolData( True ) ) r.setAttribute( "ai:visibility:shadow", IECore.BoolData( False ) ) self.assertEqual( r.getAttribute( "ai:visibility:shadow" ), IECore.BoolData( False ) ) def __displacementRender( self, doDisplacement ) : r = IECoreArnold.Renderer() r.display( "test", "ieDisplay", "rgba", { "driverType" : "ImageDisplayDriver", "handle" : "test" } ) with IECore.WorldBlock( r ) : r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) ) r.setAttribute( "ai:polymesh:subdiv_iterations", IECore.IntData( 5 ) ) r.shader( "surface", "utility", { "color_mode" : IECore.StringData( "ng" ), "shade_mode" : IECore.StringData( "flat" ) } ) if doDisplacement : r.shader( "displacement", "noise", {} ) mesh = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -2 ), IECore.V2f( 2 ) ) ) mesh.interpolation = "catmullClark" mesh.render( r ) return IECore.ImageDisplayDriver.removeStoredImage( "test" ) def testDisplacementShader( self ) : undisplaced1 = self.__displacementRender( doDisplacement = False ) undisplaced2 = self.__displacementRender( doDisplacement = False ) displaced1 = self.__displacementRender( doDisplacement = True ) displaced2 = self.__displacementRender( doDisplacement = True ) self.assertEqual( IECore.ImageDiffOp()( imageA=undisplaced1, imageB=undisplaced2, maxError=0.001 ), IECore.BoolData( False ) ) self.assertEqual( IECore.ImageDiffOp()( imageA=displaced1, imageB=displaced2, maxError=0.001 ), IECore.BoolData( False ) ) self.assertEqual( IECore.ImageDiffOp()( imageA=displaced1, imageB=undisplaced1, maxError=0.1 ), IECore.BoolData( True ) ) ## \todo This is a duplicate of AutomaticInstancingTest.__allNodes - consider # where we might be able to consolidate them to. def __allNodes( self, type = arnold.AI_NODE_ALL, ignoreRoot = True ) : result = [] i = arnold.AiUniverseGetNodeIterator( type ) while not arnold.AiNodeIteratorFinished( i ) : node = arnold.AiNodeIteratorGetNext( i ) if ignoreRoot and arnold.AiNodeEntryGetName( arnold.AiNodeGetNodeEntry( node ) ) == "list_aggregate" and arnold.AiNodeGetName( node ) == "root" : continue result.append( node ) return result def testShapeAttributes( self ) : r = IECoreArnold.Renderer() r.display( "test", "ieDisplay", "rgba", { "driverType" : "ImageDisplayDriver", "handle" : "test" } ) with IECore.WorldBlock( r ) : r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) ) r.setAttribute( "ai:polymesh:subdiv_iterations", IECore.IntData( 10 ) ) mesh = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -2 ), IECore.V2f( 2 ) ) ) mesh.render( r ) shapes = self.__allNodes( type = arnold.AI_NODE_SHAPE ) self.assertEqual( len( shapes ), 1 ) self.assertEqual( arnold.AiNodeGetInt( shapes[0], "subdiv_iterations" ), 10 ) def testShaderConnections( self ) : r = IECoreArnold.Renderer() r.display( "test", "ieDisplay", "rgba", { "driverType" : "ImageDisplayDriver", "handle" : "test" } ) with IECore.WorldBlock( r ) : r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) ) r.shader( "shader", "flat", { "color" : IECore.Color3f( 1, 0, 0 ), "__handle" : "myInputShader" } ) r.shader( "surface", "standard", { "emission" : 1.0, "emission_color" : "link:myInputShader" } ) mesh = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) ) mesh.render( r ) image = IECore.ImageDisplayDriver.removeStoredImage( "test" ) e = IECore.PrimitiveEvaluator.create( image ) result = e.createResult() e.pointAtUV( IECore.V2f( 0.5 ), result ) self.assertAlmostEqual( result.floatPrimVar( e.R() ), 1, 5 ) self.assertEqual( result.floatPrimVar( e.G() ), 0 ) self.assertEqual( result.floatPrimVar( e.B() ), 0 ) def testMissingShaderConnectionWarnings( self ) : r = IECoreArnold.Renderer() r.display( "test", "ieDisplay", "rgba", { "driverType" : "ImageDisplayDriver", "handle" : "test" } ) with IECore.WorldBlock( r ) : r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) ) m = IECore.CapturingMessageHandler() with m : r.shader( "shader", "flat", { "color" : IECore.Color3f( 1, 0, 0 ), "__handle" : "myInputShader" } ) r.shader( "surface", "standard", { "emission" : 1.0, "emission_color" : "link:oopsWrongOne" } ) self.assertEqual( len( m.messages ), 1 ) self.assertEqual( m.messages[0].level, IECore.Msg.Level.Warning ) self.failUnless( "oopsWrongOne" in m.messages[0].message ) def testLight( self ) : r = IECoreArnold.Renderer() r.display( "test", "ieDisplay", "rgba", { "driverType" : "ImageDisplayDriver", "handle" : "test" } ) with IECore.WorldBlock( r ) : r.light( "point_light", "handle", { "intensity" : 1, "color" : IECore.Color3f( 1, 0.5, 0.25 ) } ) r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -1 ) ) ) r.shader( "surface", "standard", {} ) mesh = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) ) mesh.render( r ) image = IECore.ImageDisplayDriver.removeStoredImage( "test" ) e = IECore.PrimitiveEvaluator.create( image ) result = e.createResult() e.pointAtUV( IECore.V2f( 0.5 ), result ) self.assertTrue( result.floatPrimVar( e.R() ) > 0.2 ) self.assertAlmostEqual( result.floatPrimVar( e.R() ) * 0.5, result.floatPrimVar( e.G() ) ) self.assertAlmostEqual( result.floatPrimVar( e.R() ) * 0.25, result.floatPrimVar( e.B() ) ) def testExternalProcedural( self ) : r = IECoreArnold.Renderer( self.__assFileName ) with IECore.WorldBlock( r ) : r.procedural( r.ExternalProcedural( "test.so", IECore.Box3f( IECore.V3f( 1, 2, 3 ), IECore.V3f( 4, 5, 6 ) ), { "colorParm" : IECore.Color3f( 1, 2, 3 ), "stringParm" : "test", "floatParm" : 1.5, "intParm" : 2, } ) ) ass = "".join( file( self.__assFileName ).readlines() ) self.assertTrue( "procedural" in ass ) self.assertTrue( "min 1 2 3" in ass ) self.assertTrue( "max 4 5 6" in ass ) self.assertTrue( "dso \"test.so\"" in ass ) self.assertTrue( "declare stringParm constant STRING" in ass ) self.assertTrue( "declare floatParm constant FLOAT" in ass ) self.assertTrue( "declare intParm constant INT" in ass ) self.assertTrue( "declare colorParm constant RGB" in ass ) self.assertTrue( "stringParm \"test\"" in ass ) self.assertTrue( "floatParm 1.5" in ass ) self.assertTrue( "intParm 2" in ass ) self.assertTrue( "colorParm 1 2 3" in ass ) def testPixelAspectRatio( self ) : r = IECoreArnold.Renderer( self.__assFileName ) r.camera( "main", { "resolution" : IECore.V2i( 640, 480 ), "pixelAspectRatio" : 2.0 } ) with IECore.WorldBlock( r ) : pass ass = "".join( file( self.__assFileName ).readlines() ) self.assertTrue( "aspect_ratio 0.5" in ass ) def testLightPrefixes( self ) : r = IECoreArnold.Renderer( self.__assFileName ) with IECore.WorldBlock( r ) : r.light( "distant_light", "genericHandle", {} ) r.light( "ri:point_light", "renderManHandle", {} ) r.light( "ai:spot_light", "arnoldLight", {} ) ass = "".join( file( self.__assFileName ).readlines() ) self.assertTrue( "distant_light" in ass ) self.assertTrue( "spot_light" in ass ) self.assertTrue( "point_light" not in ass ) def testDeformationMotionBlur( self ) : r = IECoreArnold.Renderer() r.display( "test", "ieDisplay", "rgba", { "driverType" : "ImageDisplayDriver", "handle" : "test" } ) r.setOption( "ai:AA_samples", IECore.IntData( 10 ) ) r.camera( "main", { "resolution" : IECore.V2i( 128, 128 ), "shutter" : IECore.V2f( 0, 1 ) } ) with IECore.WorldBlock( r ) : r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) ) with IECore.MotionBlock( r, [ 0, 1 ] ) : mesh = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1.5, -0.5 ), IECore.V2f( -0.5, 0.5 ) ) ) mesh.render( r ) mesh = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( 0.5, -0.5 ), IECore.V2f( 1.5, 0.5 ) ) ) mesh.render( r ) image = IECore.ImageDisplayDriver.removeStoredImage( "test" ) e = IECore.PrimitiveEvaluator.create( image ) result = e.createResult() e.pointAtUV( IECore.V2f( 0.5 ), result ) self.assertAlmostEqual( result.floatPrimVar( e.A() ), 0.5, 2 ) def testTransformationMotionBlur( self ) : r = IECoreArnold.Renderer() r.display( "test", "ieDisplay", "rgba", { "driverType" : "ImageDisplayDriver", "handle" : "test" } ) r.setOption( "ai:AA_samples", IECore.IntData( 10 ) ) r.camera( "main", { "resolution" : IECore.V2i( 128, 128 ), "shutter" : IECore.V2f( 0, 1 ) } ) with IECore.WorldBlock( r ) : r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) ) with IECore.MotionBlock( r, [ 0, 1 ] ) : r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( -1, 0, 0 ) ) ) r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 1, 0, 0 ) ) ) mesh = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -0.5 ), IECore.V2f( 0.5 ) ) ) mesh.render( r ) image = IECore.ImageDisplayDriver.removeStoredImage( "test" ) e = IECore.PrimitiveEvaluator.create( image ) result = e.createResult() e.pointAtUV( IECore.V2f( 0.5 ), result ) self.assertAlmostEqual( result.floatPrimVar( e.A() ), 0.5, 2 ) def testProcedural( self ) : r = IECoreArnold.Renderer( "/tmp/test.ass" ) with IECore.WorldBlock( r ) : r.procedural( r.ExternalProcedural( "someVolumeThing.so", IECore.Box3f( IECore.V3f( -1, -2, -3 ), IECore.V3f( 4, 5, 6 ) ), { "ai:nodeType" : "volume", "testFloat" : 0.5 } ) ) volume = self.__allNodes( type = arnold.AI_NODE_SHAPE )[-1] self.assertEqual( arnold.AiNodeEntryGetName( arnold.AiNodeGetNodeEntry( volume ) ), "volume" ) self.assertEqual( arnold.AiNodeGetPnt( volume, "min" ), arnold.AtPoint( -1, -2, -3 ) ) self.assertEqual( arnold.AiNodeGetPnt( volume, "max" ), arnold.AtPoint( 4, 5, 6 ) ) self.assertEqual( arnold.AiNodeGetStr( volume, "dso" ), "someVolumeThing.so" ) self.assertEqual( arnold.AiNodeGetFlt( volume, "testFloat" ), 0.5 ) def tearDown( self ) : for f in [ self.__displayFileName, self.__assFileName, ] : if os.path.exists( f ) : os.remove( f ) if __name__ == "__main__": unittest.main()
UTF-8
Python
false
false
28,909
py
17
RendererTest.py
15
0.66315
0.635892
0
848
33.090802
148
fabiogoro/flask-base
13,005,161,001,450
d899033d7740accf50b32eb4742667469e0a7c22
d218947bd5fd4f5d07127eedf1395f68b0c42ed8
/models.py
ad5dc84bc06bf13c2bc85b9102e4c3c163ec7c6d
[]
no_license
https://github.com/fabiogoro/flask-base
da2e13e4fc3e681cea4375a316965166aec3ea30
4afe4db93ab64b51152d214266c0dc66ada4e215
refs/heads/master
"2023-03-25T21:29:04.526421"
"2020-05-21T00:27:58"
"2020-05-21T00:27:58"
262,611,046
0
8
null
false
"2021-03-20T03:54:23"
"2020-05-09T16:22:37"
"2020-05-21T00:28:51"
"2021-03-20T03:54:23"
16
0
10
1
Python
false
false
## models.py from banco import bd class Post: def __init__(self, titulo, autor, texto): self.titulo = titulo self.autor = autor self.texto = texto def gravar(self): sql = '''insert into posts (titulo, autor, texto) values (?, ?, ?)''' primeiro_interrogacao = self.titulo segundo_interrogacao = self.autor terceiro_interrogacao = self.texto bd().execute(sql, [primeiro_interrogacao, segundo_interrogacao, terceiro_interrogacao]) bd().commit() @staticmethod def recupera_todos(): ## Usamos o objeto retornado por bd() para realizar comandos sql sql = '''select titulo, autor, texto from posts order by id desc''' cur = bd().execute(sql) ## Montamos dicionário dicionários com os resultados da consulta para passar para a view posts = [] for titulo, autor, texto in cur.fetchall(): # fetchall() gera uma lista com os resultados: post = Post(titulo, autor, texto) posts.append(post) return posts
UTF-8
Python
false
false
1,075
py
11
models.py
6
0.614166
0.614166
0
30
34.766667
98
WillDrug/modelwrapper
18,580,028,558,705
4e23bba14917ad01d6adf1e9dcb457569b8a5bf6
e7ff00f035457cf6adc22dd7bcb9955a826dc0f6
/models_handler/core.py
b263835db0c0b5037ace5f73ca1defbc7132bf9f
[]
no_license
https://github.com/WillDrug/modelwrapper
800ec4e70f0d2f9158e57a40a5bf63094add1939
e64f40870f9be4c10bf64296bd43a923a6541865
refs/heads/master
"2022-12-10T18:32:18.570692"
"2018-08-13T17:59:59"
"2018-08-13T17:59:59"
144,338,927
0
0
null
false
"2022-12-08T01:01:46"
"2018-08-10T22:53:08"
"2018-08-13T18:00:08"
"2022-12-08T01:01:45"
35
0
0
3
Python
false
false
import base64 import hashlib from enum import Enum import importlib import json import os import pkgutil import shelve from abc import ABCMeta, abstractmethod from datetime import datetime from sklearn.exceptions import NotFittedError class Config(Enum): LATEST_TAG = 'latest' MODELS_FOLDER = 'models' MODEL_CFG_FILE = 'config.json' MODEL_CFG = 'model' MODEL_CONN_CFG = 'connections' LAST_LAUNCH_DATE = 'last_launch_date' LAST_LAUNCH_TIME = 'last_launch_time' DUMP_MODEL_SECTION = 'model' DEMP_META_SECTION = 'description' def __get__(self, instance, owner): return self.value DUMPS_PATH = os.environ['DUMPS_PATH'] class ModelInterface(metaclass=ABCMeta): """ Generic interface for creating model launcher, that can be found and loaded dynamically by ModelLoader. Every model MUST have a launcher class, subclassing this interface. Models can have multiple implementations of this interface, though it makes no sense in general """ def __init__(self, file=__file__): """ Existing model loads by id. New models get timestamp as id. 1) MUST BE OVERRIDED IN EVERY MODEL 2) MUST ME OVERRIDED STRICTLY & ONLY THIS WAY: def __init__(self): super().__init__(__file__) """ self.__dump_path = os.path.join(DUMPS_PATH, self.__class__.__name__) # path to model dump in dumps folder self.model_path = os.path.dirname(file) # path to model folder self.model_core = None # model itself try: with shelve.open(self.__dump_path) as db: # list of all model versions self.model_versions = list(db.keys()) db.close() except OSError: self.model_versions = [] with open(os.path.join(os.path.dirname(file), Config.MODEL_CFG_FILE), 'r', encoding='utf-8') as fl: cfg = json.load(fl) fl.close() self.model_name = self.__class__.__name__ self.score_ball = 0 self.model_config, self.conn_config = cfg[Config.MODEL_CFG], cfg[ Config.MODEL_CONN_CFG] # configs for model core and connections # metadata self.metadata = {} def _update_meta(self): dt = datetime.now() self.metadata[Config.LAST_LAUNCH_DATE] = dt.date() self.metadata[Config.LAST_LAUNCH_TIME] = dt.time() @abstractmethod def fit(self): """ Used to launch model fitting. Implementation is totally yours, but in terms of compatibility return True if fitting is successful or False if failed. """ pass @abstractmethod def predict(self): """ Used to launch prediction. Implementation is totally yours, but in terms of compatibility save prediction result to self.prediction and then return self.prediction. """ pass @abstractmethod def score(self): """ Used to get model score for champion / challenger comparision. Implementation is totally yours, but in terms of compatibility save scoring result as aggregated ball to self.score_res and then return self.score_res. """ pass @abstractmethod def dump_model_core(self, dump_id: str = str(datetime.now()), new_champ: bool = False): """ Saves model core to binary object. It's up to you - either to use default implementation or make your own. """ if not os.path.exists(DUMPS_PATH): os.mkdir(DUMPS_PATH, mode=0o777) # checks and creates dumps folder inside modelwrapper root db = shelve.open(self.__dump_path) self._update_meta() db[dump_id] = { 'saved': datetime.now(), 'description': self.metadata, 'model': self.model_core, 'score': self.score() } if new_champ: # latest updates only on new champ. latest always loads by default db[Config.LATEST_TAG] = db.get(dump_id) db.close() return True @abstractmethod def load_model_core(self, dump_id: str): """ Loads model core from specific binary object. It's up to you - either to use default implementation or make your own. """ if not os.path.exists(DUMPS_PATH): os.mkdir(DUMPS_PATH, mode=0o777) # checks and creates dumps folder inside modelwrapper root db = shelve.open(self.__dump_path) model = db.get(dump_id) db.close() if model is None: raise NotFittedError() self.model_core = model[Config.DUMP_MODEL_SECTION] self.metadata = model[Config.DEMP_META_SECTION] self.score_ball = model['score'] return True @abstractmethod def delete_model_core(self, dump_id: str): """ Deletes specified model version from dump """ db = shelve.open(self.__dump_path) del db[dump_id] db.close() return True def show_dumps(self): try: db = shelve.open(self.__dump_path) return {x: { 'saved': str(db[x]['saved']), 'description': str(db[x]['description']), 'model': str(db[x]['model'].__class__.__name__), 'score': self.score() } for x in db} finally: db.close() def restore_dump(self, dump_id: str): self.load_model_core(dump_id=dump_id) self.dump_model_core(dump_id=dump_id, new_champ=True) return True class ModelLoader: """ Loads models from folders dynamically. """ def __init__(self, model_name: str): self.__model_list = None self.__base_folder = os.path.join(os.path.dirname(__file__), Config.MODELS_FOLDER) self._import_models() # load models recursively based on ModelInterface implementation self.model = self.__model_list[model_name] def _import_models(self) -> None: """ Finds all ModelInterface subclasses and treats them as models. Imports them dynamically. """ for model in os.listdir(self.__base_folder): # check models dir and it's subfolders. os.walk is not convenient pkg_dir = os.path.join(self.__base_folder, model) for (finder, name, ispkg) in pkgutil.iter_modules([pkg_dir]): # dynamically import packages from models/{model_name}/ folder importlib.import_module(f'{__package__}.models.{model}.{name}') # generates a dict of ModelInterface implementations self.__model_list = {subcl.__name__: subcl for subcl in ModelInterface.__subclasses__()}
UTF-8
Python
false
false
6,741
py
26
core.py
21
0.600949
0.598873
0
194
33.747423
119
rdecks155/FormFit
2,216,203,125,282
f269b1c81df5c0876d9dd69d9a5bb73169427d76
c04755a61505b52328e2f314bbf2af7058369d72
/video_processing.py
256f062d35908d33e59fbe6e68e392244e964e8a
[]
no_license
https://github.com/rdecks155/FormFit
0c9546c5b6aefcd393f0a754cbb993e3cb9543ec
70f1bfc8f6da0d702656eaa20f0cb3c68f7c67ae
refs/heads/master
"2023-06-13T22:34:18.800731"
"2021-07-14T19:51:42"
"2021-07-14T19:51:42"
225,216,573
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# from PIL import Image # import csv # im = Image.open('./bar_closeup.jpg', 'r') import cv2 # for capturing videos import math # for mathematical operations import matplotlib.pyplot as plt # for plotting the images #%matplotlib inline import pandas as pd from keras.preprocessing import image # for preprocessing the images import numpy as np # for mathematical operations from keras.utils import np_utils from skimage.transform import resize # for resizing images count = 0 videoFile = "./videos/originals/video5.mp4" cap = cv2.VideoCapture(videoFile) # capturing the video from the given path #frameRate = cap.get(5) #frame rate # video3: frames = [14.7,16.5,18,19.9,21.6,23.5,25.3,26.8,28.5,30.4,32.7,35.3,37.6, 41, 43, 44.9, 46.8, 48.8, 50.6, 55.1] # video4: frames = [5.4, 7.2, 8.8, 10.1, 13.1, 14.7, 16.4,17.9, 19.4, 21.2, 23.2, 24.8, 26.5, 28.2, 29.8, 31.4, 33, 34.5, 36] frames = [5.1, 13, 16.3, 19.2, 22.3, 25.4, 28.4, 31.5, 34.7, 37.6, 40.6, 43.7, 50.2, 52.5, 55.5, 58.2, 60.9, 64.6, 68, 70.2, 73.1, 76.6, 79.8, 83.8, 87.1, 89.3, 91.6] for time in frames: for i in range(3): if i == 0: time_milliseconds = (time - .1) * 1000 elif i == 1: time_milliseconds = time * 1000 else: time_milliseconds = (time + .1) * 1000 cap.set(cv2.CAP_PROP_POS_MSEC,time_milliseconds) success, image = cap.read() if success: filename = "stage3_frame%d.jpg" % count;count+=1 cv2.imwrite('./videos/bar_frames/stage3/' + filename, image) cap.release()
UTF-8
Python
false
false
1,623
py
4
video_processing.py
3
0.602588
0.468885
0
38
40.710526
166
BehzadBozorgtabar/LTS5-ADAS-ME-tool
377,957,138,726
e11d5e0f3283978be76c3d970c4b0f9ae330c17f
131103784ee2cd9d2553a5ab3636ea7ab9e73e7b
/interface/data.py
6a32129fc79f886b46bc0504d6063cd49cf7d181
[]
no_license
https://github.com/BehzadBozorgtabar/LTS5-ADAS-ME-tool
c9d32c16d07a1050262b1d60c7b8bce6ce91ad8e
35e2641932762caced5be2e560c5284520891ebf
refs/heads/master
"2020-03-28T15:44:44.282496"
"2019-01-08T13:01:58"
"2019-01-08T13:01:58"
148,622,171
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# constant for all stick = "nesw" pad = 5 defaultSavePath = "data/files/" MAX_SEGMENT_SIZE = 1000 SMB_HEADER_SIZE = 20 #constants for video first_frame = 1 first_index = first_frame - 1 FRAME_INDEX = 1 NBR_FRAMES = 7 MIN_SEGMENT_SIZE = 20 window_width = 640 window_height = 480 SEGMENT_SIZE = 5 delay = 15 #constant for graph xLabel = "Frame Number" yLabel = "Value" MARGIN = 0.2 Y_UPPER_BOUND = 1 Y_LOWER_BOUND = -1 DEFAULT_TICK = 1.0 #Constants for annotation frame inactiveColor = 'grey' activeColor = 'green' mapStateColor = {'normal' : activeColor, 'disabled' : inactiveColor} Valence = 0 Arousal = 1 CanvasID = 2 valAr = ['Valence', 'Arousal'] valArSev = ['Valence', 'Arousal', 'Severity'] MACOS_GRAPHIC_MARGIN = 0.5
UTF-8
Python
false
false
726
py
7
data.py
6
0.705234
0.65978
0
36
19.166667
68
QWQ-pixel/practice_python_2
7,447,473,307,050
5cd9e0d4f30e15afc6f758246c3667ed014f3182
c1973d141f0168abcd107ceb28355b7c3e523b60
/num_12/12.7.py
b34a59a7392cce996f89dfc54411e00a5922b37a
[]
no_license
https://github.com/QWQ-pixel/practice_python_2
1a47de7efd0ebf2d7b461007eebe570f92c0c5b4
5165b65715223e7192458de8b33b908d371d08d7
refs/heads/main
"2023-04-05T09:38:35.594789"
"2021-04-14T17:08:06"
"2021-04-14T17:08:06"
357,983,337
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def scale(n_, m_): strings = [] for i in range(n_): strings.append(input()) a, b = round(n_ / 2), round(m_ / 2) strings = change_string(n_, m_, a, b, strings) print(*strings, sep='\n') def change_string(n_, m_, a, b, strings): count, res = 0, [] for i in range(n_ - a): if count > len(strings): break res.append(strings[count]) count += 2 return change_symbols(m_, b, res) def change_symbols(m_, b, strings): count, res, new_string = 1, [], '' for string in strings: for i in range(m_ - b): new_string = string[::2] res.append(new_string) return res if __name__ == "__main__": n, m = int(input()), int(input()), scale(n, m)
UTF-8
Python
false
false
754
py
47
12.7.py
47
0.506631
0.498674
0
31
23.322581
50
sigfrido/sigdoku
9,371,618,644,206
8b4e54875f41b60ce12bc253305c948c6d193abc
7fc02c1122de8cc76a22a23ac766981110d59642
/sigdoku/sudoku.py
52d3b2f5256ddda6f8a759f3771fa3f322cb2b6c
[]
no_license
https://github.com/sigfrido/sigdoku
7bf63ef661944263fdab67d526ad13be9f2d85fb
217fa13aede9b9fe5f1306b7d2567649c4d162b7
refs/heads/master
"2020-05-27T13:35:10.328186"
"2016-08-19T11:10:35"
"2016-08-19T11:10:35"
6,645,771
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- import solvers class SudokuException(Exception): """ Base class for all sudoku exceptions """ pass class DeniedMoveException(SudokuException): """ User attempted a denied move """ pass class OutOfRangeException(SudokuException): """ User specified an out of range value """ pass class Dimensions(object): """ A Dimensions object defines the size of the sudoku board and the range of the allowed moves """ VALID_ROOTS = [2, 3, 4] def __init__(self, root): try: introot = int(root) if introot in Dimensions.VALID_ROOTS: self.__root = introot self.__size = self.__root**2 self.ALL_MOVES = list(self.all_moves()) return except: pass raise OutOfRangeException("Root dimension not in range 2..4: %s" % root) @property def root(self): """ For a typical sudoku board, root = 3 """ return self.__root @property def size(self): """ The board size is the root value squared: 9 for a typical sudoku board """ return self.__size def all_moves(self): return set(range(1, self.__size + 1)) def get_int_in_range(self, value): try: intvalue = int(value) if intvalue >= 0 and intvalue <= self.__size: return intvalue except: pass raise OutOfRangeException("Value not in range 0..%d: %s" % (self.__size, value)) class Cell(object): """ A board cell """ def __init__(self, dimensions): self.__value = 0 self.__dimensions = dimensions self.__listeners = [] self.__groups = [] self.row = None self.col = None self.square = None @property def dimensions(self): return self.__dimensions @property def value(self): return self.__value def move(self, value): intvalue = self.dimensions.get_int_in_range(value) if self.__value and intvalue: raise DeniedMoveException('The cell has already a value') if intvalue: if not intvalue in self.allowed_moves(): raise DeniedMoveException('This value is denied for the cell') self.__value = intvalue self.changed(0) else: old_value, self.__value = self.__value, 0 self.changed(old_value) def changed(self, old_value): for g in self.__listeners: g.cell_changed(self, old_value) def add_listener(self, group): self.__listeners.append(group) def add_group(self, group): self.__groups.append(group) def empty(self): self.move(0) def is_empty(self): return 0 == self.__value def allowed_moves(self): return set.intersection( *[group.allowed_moves() for group in self.__groups] ) if not self.value else set() class BaseCellGroup(object): def __init__(self, dimensions): self.__cells = [] self.__dimensions = dimensions @property def num_cells(self): return self.__dimensions.size def cell_changed(self, cell, old_value): pass def add_cell(self, cell): if len(self.__cells) == self.num_cells: raise IndexError('Dimensions exceeded in group') if not isinstance(cell, Cell): raise Exception('This is not a Cell') self.__cells.append(cell) cell.add_listener(self) def cell(self, index): return self.__cells[self.dimensions.get_int_in_range(index) - 1] @property def cells(self): return self.__cells @property def dimensions(self): return self.__dimensions def allowed_moves_for_cells(self): return dict((cell, cell.allowed_moves()) for cell in self.cells) # if not cell.value class CellGroup(BaseCellGroup): def __init__(self, dimensions): super(CellGroup, self).__init__(dimensions) self.index = None def add_cell(self, cell): super(CellGroup, self).add_cell(cell) cell.add_group(self) def allowed_moves(self): return self.dimensions.all_moves().difference( set([cell.value for cell in self.cells]) ) class Square(CellGroup): def __init__(self, dimensions): super(Square, self).__init__(dimensions) self.rows = [] self.cols = [] # Convenience global with all the solvers in the right order ALL_SOLVERS = [solvers.BaseSolver(), solvers.RowColInSquareSolver(), solvers.CoupleTripletInGroupSolver()] class Board(BaseCellGroup): def __init__(self, root=3, solvers=ALL_SOLVERS): super(Board, self).__init__(Dimensions(root)) self.__rows = self.__makeCellGroups() self.__cols = self.__makeCellGroups() self.__squares = self.__makeCellGroups(Square) self.__solvers = list(solvers)[:] self.__moves = [] cells_per_facet = self.dimensions.size cells_per_board = cells_per_facet**2 cells_per_square_facet = root # All zero-based for cell_index in range(cells_per_board): cell = Cell(self.dimensions) board_row = cell_index / cells_per_facet board_col = cell_index % cells_per_facet self.__rows[board_row].add_cell(cell) self.__cols[board_col].add_cell(cell) cell.row = board_row + 1 cell.col = board_col + 1 cell_square_index = cell_index / cells_per_square_facet square_row = cell_square_index / cells_per_square_facet / cells_per_square_facet square_col = cell_square_index % cells_per_square_facet square_index = square_row*cells_per_square_facet + square_col square = self.__squares[square_index] square.add_cell(cell) if not cell.row in square.rows: square.rows.append(cell.row) if not cell.col in square.cols: square.cols.append(cell.col) cell.square = square_index + 1 # We need board listener being called last self.add_cell(cell) @property def size(self): return self.dimensions.size @property def num_cells(self): return self.dimensions.size**2 def move(self, moves): for (row, col, value) in moves: self.row(row).cell(col).move(value) def __makeCellGroups(self, clazz=CellGroup): cgs = [] for i in range(self.dimensions.size): cgs.append(clazz(self.dimensions)) cgs[i].index = i + 1 return cgs def cell(self, index): return self.cells[index - 1] def row(self, rowIndex): return self.__rows[self.dimensions.get_int_in_range(rowIndex) - 1] def col(self, colIndex): return self.__cols[self.dimensions.get_int_in_range(colIndex) - 1] def square(self, squareIndex): return self.__squares[squareIndex - 1] @property def rows(self): return self.__rows @property def cols(self): return self.__cols @property def squares(self): return self.__squares @property def moves(self): return self.__moves def cell_changed(self, cell, old_value): self.__moves.append((cell.row, cell.col, cell.value)) @property def all_groups(self): return self.__rows + self.__cols + self.__squares def find_move(self): allowed_moves = self.allowed_moves_for_cells() for solver in self.__solvers: (c, v) = solver.find_move(self, allowed_moves) if c is not None: return (c, v) return (None, None) def finished(self): return all([cell.value for cell in self.cells]) def solve(self): while not self.finished(): (cell, value) = self.find_move() if cell is None: return False else: cell.move(value) return True def dump(self): return '\n'.join([''.join([str(cell.value) for cell in row.cells]) for row in self.rows])
UTF-8
Python
false
false
8,943
py
7
sudoku.py
6
0.530694
0.52734
0
356
24.047753
106
qcgm1978/formula
5,325,759,460,685
0375c667df84c0a52cce9832f688847d563e151d
17e2b54170b0a37153e2ddc894842115b569d45f
/py/datatype.py
d7c2c7271a2b73e4e58a23eef4da5a788bb9d750
[ "Apache-2.0" ]
permissive
https://github.com/qcgm1978/formula
7bc9e25d9a29125a425632a1fa6b681c04e1f3b3
fee12667b585e37b21768f4d165b8bc5f2d4f448
refs/heads/master
"2022-12-13T05:49:49.416723"
"2020-08-30T04:49:43"
"2020-08-30T04:49:43"
290,834,162
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import pandas from sklearn import linear_model, tree from sklearn.preprocessing import StandardScaler from sklearn.metrics import r2_score from sklearn.tree import DecisionTreeClassifier import pydotplus from scipy import stats import numpy as np import math import matplotlib.pyplot as plt import matplotlib.image as pltimg class DataTypes(object): def __init__(self, n=None): if isinstance(n, dict): self.info = n listProp = list( filter( lambda key: isinstance(n[key], (list, np.ndarray)) and key, n.keys() ) ) if len(listProp): self.prop = listProp[0] self.list = self[self.prop] self.len = len(self.list) if 'target' in n: self.target=n['target'] if "file" in n: self.df = self.readCsv(n["file"]) if 'mapData' in n: self.df =self.mapStrToNum(n['mapData']) else: self.n = n def __getitem__(self, i): try: return self.info[i] except KeyError: return None def getGini(self, getSample=None): if callable(getSample): samples = getSample(self.df) else: samples = self.df # Gini = 1 - (x/n)2 - (y/n)2 # Where x is the number of positive answers("GO"), n is the number of samples, and y is the number of negative answers ("NO"), which gives us this calculation: n = samples.shape[0] x=samples.loc[samples[self.target] == 1].shape[0] y = n - x Gini = 1 - (x / n) ** 2 - (y / n) ** 2 return Gini,n,[y,x] def getAndFormatData(self, file, dictionary): df = self.readCsv(file) self.mapStrToNum(dictionary, df) return self def predictbyDecisionTree(self,features,condition, y=None): dtree=self.getDtree(features,y) return dtree.predict([condition]) def getDtree(self, features, y=None): if y is None: y=self.target df = self.df X = df[features] dtree = DecisionTreeClassifier() dtree = dtree.fit(X, df[y]) return dtree def createDecisionTreeData(self, features, y): dtree=self.getDtree(features, y) self.graphData = tree.export_graphviz( dtree, out_file=None, feature_names=features ) return self def graphByData(self, img): graph = pydotplus.graph_from_dot_data(self.graphData) graph.write_png(img) img = pltimg.imread(img) imgplot = plt.imshow(img) return self def pyplot(self, bars=5): plt.hist(self.list, bars) self.show() def polynomialRegressionLine(self): x = self.info["x"] y = self.info["y"] mymodel = np.poly1d(np.polyfit(x, y, 3)) minX = int(min(x)) maxX = int(max(x)) maxY = int(max(y)) myline = np.linspace(minX, maxX, maxY) self.scatter() plt.plot(myline, mymodel(myline)) self.show() def predictMultipleRegression(self, file, predictVals): X = self.info["x"] y = self.info["y"] df = self.readCsv(file) regr = linear_model.LinearRegression() regr.fit(df[X], df[y]) predict = regr.predict([predictVals]) return predict[0], list(regr.coef_) def predictScale(self, file, toTransformVals): df = self.readCsv(file) X = df[self.info["x"]] y = df[self.info["y"]] scale = StandardScaler() scaledX = scale.fit_transform(X) regr = linear_model.LinearRegression() regr.fit(scaledX, y) scaled = scale.transform([toTransformVals]) predict = regr.predict([scaled[0]]) return predict[0], list(regr.coef_) def scale(self, file, scaleCols): scale = StandardScaler() df = self.readCsv(file) X = df[scaleCols] scaledX = scale.fit_transform(X) return scaledX def readCsv(self, file): self.df = pandas.read_csv(file) return self.df def mapStrToNum(self, dictionary, df=None): if df is None: df = self.df for field, v in dictionary.items(): df[field] = df[field].map(v) self.df=df return df def predictPolynomialRegression(self, predictX): mymodel = self.getPolynomialModel() return mymodel(predictX) def getPolynomialModel(self): x = self.info["x"] y = self.info["y"] mymodel = np.poly1d(np.polyfit(x, y, 4)) return mymodel def getRSquared(self, dataType="All"): x = self.info["x"] y = self.info["y"] x, y = self.getData(dataType) mymodel = self.getPolynomialModel() return r2_score(y, mymodel(x)) def plotScatter(self, dataType="All"): x = self.info["x"] y = self.info["y"] x, y = self.getData(dataType) self.scatter(x, y) self.show() def getData(self, dataType="All"): x = self.info["x"] y = self.info["y"] if dataType == "train": x = x[:80] y = y[:80] elif dataType == "test": x = x[80:] y = y[80:] return x, y def scatter(self, x=None, y=None): if x is None or y is None: x = self.info["x"] y = self.info["y"] plt.scatter(x, y) def show(self): plt.show() def getR(self): x = self.info["x"] slope, intercept, r, p, std_err = stats.linregress(x, self.info["y"]) return r def predict(self, predictX): slope, intercept, r, p, std_err = stats.linregress( self.info["x"], self.info["y"] ) return slope * predictX + intercept def getModel(self): x = self.info["x"] myfunc = self.predict mymodel = list(map(myfunc, x)) return mymodel def scatterLine(self): mymodel = self.getModel() self.scatter() plt.plot(self.info["x"], mymodel) self.show() def Numerical(self): return self.Discrete() or self.Continuous() def Discrete(self): return isinstance(self.n, int) def Continuous(self): return isinstance(self.n, float) def Categorical(self): return "color" def Ordinal(self): return "school grades" def getMean(self): # return sum(self['speed'])/len(self['speed']) prop = self.prop return np.mean(self[prop]) def getMedian(self): # speed = self['speed'].copy() # speed.sort() # return speed[len(speed)//2] return np.median(self.list) def getMode(self): return stats.mode(self.list) def getStd(self): return np.std(self.list) def getMSE(self,actual): minusSquare = map(lambda x: (x - actual) ** 2, self.list) sumVal = sum(list(minusSquare)) mse=sumVal / self.len return mse def get1stdProbability(self): mean = self.getMean() mse=self.getMSE(mean) probability = math.sqrt(mse) return probability def getDistance1std(self): expect = self["expectation"] if expect: mean = self.getMean() unitStd = self.get1stdProbability() difference = expect - mean differenceStd = difference / unitStd return differenceStd else: return self.getStd() def getPercentile(self, percent): # listP = self.list.copy() # listP.sort() # lessIndex=round(self.len*percent) # val = listP[lessIndex-1] # return val return np.percentile(self.list, percent * 100) def getProbability(self): std = self.getDistance1std() std2decimal = round(std, 2) if std2decimal == 1.00: return 0.683 elif std2decimal == 1.87: return 0.015 elif std2decimal == 2.00: return 0.954 def getVariance(self): # mean = self.getMean() # difference = map(lambda x: x - mean, self.list) # square = map(lambda x: x ** 2, difference) # squareList = list(square) # variance = sum(squareList) / len(squareList) variance = np.var(self.list) return variance
UTF-8
Python
false
false
8,407
py
11
datatype.py
5
0.553467
0.545736
0
248
32.899194
167
averdier/epsi_pt_oracle_api
17,282,948,420,705
a2746c0228372ded3c8bdfe5f197c3ca1ccda7bb
f99eac90a1f9cfb87c1f2d584fec61ee04cec8aa
/app/db.py
9e8c73c2f0a6bfba299a5c293ec3077e25ff030e
[]
no_license
https://github.com/averdier/epsi_pt_oracle_api
435f4e377b6316c60779a01064e4eacb9d89fb86
174ce12ed0aeb67969880d0ddf2fbc59a5cd80fe
refs/heads/master
"2020-03-18T12:42:11.759034"
"2018-07-01T18:27:35"
"2018-07-01T18:27:35"
134,739,663
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import cx_Oracle conn = None def connect(uri): """ Connect to database :param uri: :return: """ global conn if conn is None: conn = cx_Oracle.connect(uri) def get_conn(): return conn
UTF-8
Python
false
false
230
py
37
db.py
32
0.565217
0.565217
0
20
10.5
37
SpIinty/league-of-stats
944,892,811,550
d02c3e90cfcfa2d7543f21e642a5540f03d4ae2c
25f6abb8db551f2160d8a0a8376200ff110a15a9
/runes.py
ebee70e27b5b83cc0352855efe3ba3a82a330adf
[]
no_license
https://github.com/SpIinty/league-of-stats
59ce203b1e190159dda4ce24c444220707ba7805
93d7ee416489c8107694d6294c840e98deb824e8
refs/heads/main
"2023-05-07T00:06:29.780029"
"2021-06-04T06:05:55"
"2021-06-04T06:05:55"
370,393,806
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import json global RUNES with open('runesReforged.json') as outfile: RUNES=json.load( outfile) class Rune: def __init__(self,identification): self.id=identification for i in RUNES: for j in i['slots']: for k in j['runes']: if(k['id']==self.id): self.name=k['name'] self.desc=k['shortDesc'] self.png=k['icon'] else: pass splitedec=self.desc.split('<') fixedesc='' for i in range(len(splitedec)): try: fixedesc+=(splitedec[i].split('>')[1]) except: fixedesc+=splitedec[i] self.desc=fixedesc runes=[] for i in RUNES: for j in i['slots']: for k in j['runes']: runes.append(Rune(k['id']))
UTF-8
Python
false
false
984
py
15
runes.py
8
0.410569
0.409553
0
36
25.333333
54
statsmodels/statsmodels
16,956,530,912,697
8f1b735690f2a39ea1590b43fd41783ecc241dce
749af8e81d5ccd2d8714a34434a9c77772df551b
/statsmodels/multivariate/factor_rotation/_wrappers.py
e5a5e4b574129e376728a8656dae37189f72634d
[ "BSD-3-Clause" ]
permissive
https://github.com/statsmodels/statsmodels
98ca67192c08bcc611ed3a75edaded2c7181ab98
01b19d7d111b29c183f620ff0a949ef6391ff8ee
refs/heads/main
"2023-09-05T13:05:49.497076"
"2023-09-01T10:54:50"
"2023-09-01T10:54:50"
1,885,237
8,666
3,023
BSD-3-Clause
false
"2023-09-13T17:51:48"
"2011-06-12T17:04:50"
"2023-09-13T17:43:18"
"2023-09-13T17:51:48"
53,160
8,844
2,772
2,667
Python
false
false
# -*- coding: utf-8 -*- from ._analytic_rotation import target_rotation from ._gpa_rotation import oblimin_objective, orthomax_objective, CF_objective from ._gpa_rotation import ff_partial_target, ff_target from ._gpa_rotation import vgQ_partial_target, vgQ_target from ._gpa_rotation import rotateA, GPA __all__ = [] def rotate_factors(A, method, *method_args, **algorithm_kwargs): r""" Subroutine for orthogonal and oblique rotation of the matrix :math:`A`. For orthogonal rotations :math:`A` is rotated to :math:`L` according to .. math:: L = AT, where :math:`T` is an orthogonal matrix. And, for oblique rotations :math:`A` is rotated to :math:`L` according to .. math:: L = A(T^*)^{-1}, where :math:`T` is a normal matrix. Parameters ---------- A : numpy matrix (default None) non rotated factors method : str should be one of the methods listed below method_args : list additional arguments that should be provided with each method algorithm_kwargs : dictionary algorithm : str (default gpa) should be one of: * 'gpa': a numerical method * 'gpa_der_free': a derivative free numerical method * 'analytic' : an analytic method Depending on the algorithm, there are algorithm specific keyword arguments. For the gpa and gpa_der_free, the following keyword arguments are available: max_tries : int (default 501) maximum number of iterations tol : float stop criterion, algorithm stops if Frobenius norm of gradient is smaller then tol For analytic, the supported arguments depend on the method, see above. See the lower level functions for more details. Returns ------- The tuple :math:`(L,T)` Notes ----- What follows is a list of available methods. Depending on the method additional argument are required and different algorithms are available. The algorithm_kwargs are additional keyword arguments passed to the selected algorithm (see the parameters section). Unless stated otherwise, only the gpa and gpa_der_free algorithm are available. Below, * :math:`L` is a :math:`p\times k` matrix; * :math:`N` is :math:`k\times k` matrix with zeros on the diagonal and ones elsewhere; * :math:`M` is :math:`p\times p` matrix with zeros on the diagonal and ones elsewhere; * :math:`C` is a :math:`p\times p` matrix with elements equal to :math:`1/p`; * :math:`(X,Y)=\operatorname{Tr}(X^*Y)` is the Frobenius norm; * :math:`\circ` is the element-wise product or Hadamard product. oblimin : orthogonal or oblique rotation that minimizes .. math:: \phi(L) = \frac{1}{4}(L\circ L,(I-\gamma C)(L\circ L)N). For orthogonal rotations: * :math:`\gamma=0` corresponds to quartimax, * :math:`\gamma=\frac{1}{2}` corresponds to biquartimax, * :math:`\gamma=1` corresponds to varimax, * :math:`\gamma=\frac{1}{p}` corresponds to equamax. For oblique rotations rotations: * :math:`\gamma=0` corresponds to quartimin, * :math:`\gamma=\frac{1}{2}` corresponds to biquartimin. method_args: gamma : float oblimin family parameter rotation_method : str should be one of {orthogonal, oblique} orthomax : orthogonal rotation that minimizes .. math:: \phi(L) = -\frac{1}{4}(L\circ L,(I-\gamma C)(L\circ L)), where :math:`0\leq\gamma\leq1`. The orthomax family is equivalent to the oblimin family (when restricted to orthogonal rotations). Furthermore, * :math:`\gamma=0` corresponds to quartimax, * :math:`\gamma=\frac{1}{2}` corresponds to biquartimax, * :math:`\gamma=1` corresponds to varimax, * :math:`\gamma=\frac{1}{p}` corresponds to equamax. method_args: gamma : float (between 0 and 1) orthomax family parameter CF : Crawford-Ferguson family for orthogonal and oblique rotation which minimizes: .. math:: \phi(L) =\frac{1-\kappa}{4} (L\circ L,(L\circ L)N) -\frac{1}{4}(L\circ L,M(L\circ L)), where :math:`0\leq\kappa\leq1`. For orthogonal rotations the oblimin (and orthomax) family of rotations is equivalent to the Crawford-Ferguson family. To be more precise: * :math:`\kappa=0` corresponds to quartimax, * :math:`\kappa=\frac{1}{p}` corresponds to varimax, * :math:`\kappa=\frac{k-1}{p+k-2}` corresponds to parsimax, * :math:`\kappa=1` corresponds to factor parsimony. method_args: kappa : float (between 0 and 1) Crawford-Ferguson family parameter rotation_method : str should be one of {orthogonal, oblique} quartimax : orthogonal rotation method minimizes the orthomax objective with :math:`\gamma=0` biquartimax : orthogonal rotation method minimizes the orthomax objective with :math:`\gamma=\frac{1}{2}` varimax : orthogonal rotation method minimizes the orthomax objective with :math:`\gamma=1` equamax : orthogonal rotation method minimizes the orthomax objective with :math:`\gamma=\frac{1}{p}` parsimax : orthogonal rotation method minimizes the Crawford-Ferguson family objective with :math:`\kappa=\frac{k-1}{p+k-2}` parsimony : orthogonal rotation method minimizes the Crawford-Ferguson family objective with :math:`\kappa=1` quartimin : oblique rotation method that minimizes minimizes the oblimin objective with :math:`\gamma=0` quartimin : oblique rotation method that minimizes minimizes the oblimin objective with :math:`\gamma=\frac{1}{2}` target : orthogonal or oblique rotation that rotates towards a target matrix : math:`H` by minimizing the objective .. math:: \phi(L) =\frac{1}{2}\|L-H\|^2. method_args: H : numpy matrix target matrix rotation_method : str should be one of {orthogonal, oblique} For orthogonal rotations the algorithm can be set to analytic in which case the following keyword arguments are available: full_rank : bool (default False) if set to true full rank is assumed partial_target : orthogonal (default) or oblique rotation that partially rotates towards a target matrix :math:`H` by minimizing the objective: .. math:: \phi(L) =\frac{1}{2}\|W\circ(L-H)\|^2. method_args: H : numpy matrix target matrix W : numpy matrix (default matrix with equal weight one for all entries) matrix with weights, entries can either be one or zero Examples -------- >>> A = np.random.randn(8,2) >>> L, T = rotate_factors(A,'varimax') >>> np.allclose(L,A.dot(T)) >>> L, T = rotate_factors(A,'orthomax',0.5) >>> np.allclose(L,A.dot(T)) >>> L, T = rotate_factors(A,'quartimin',0.5) >>> np.allclose(L,A.dot(np.linalg.inv(T.T))) """ if 'algorithm' in algorithm_kwargs: algorithm = algorithm_kwargs['algorithm'] algorithm_kwargs.pop('algorithm') else: algorithm = 'gpa' assert not ('rotation_method' in algorithm_kwargs), ( 'rotation_method cannot be provided as keyword argument') L = None T = None ff = None vgQ = None p, k = A.shape # set ff or vgQ to appropriate objective function, compute solution using # recursion or analytically compute solution if method == 'orthomax': assert len(method_args) == 1, ('Only %s family parameter should be ' 'provided' % method) rotation_method = 'orthogonal' gamma = method_args[0] if algorithm == 'gpa': vgQ = lambda L=None, A=None, T=None: orthomax_objective( L=L, A=A, T=T, gamma=gamma, return_gradient=True) elif algorithm == 'gpa_der_free': ff = lambda L=None, A=None, T=None: orthomax_objective( L=L, A=A, T=T, gamma=gamma, return_gradient=False) else: raise ValueError('Algorithm %s is not possible for %s ' 'rotation' % (algorithm, method)) elif method == 'oblimin': assert len(method_args) == 2, ('Both %s family parameter and ' 'rotation_method should be ' 'provided' % method) rotation_method = method_args[1] assert rotation_method in ['orthogonal', 'oblique'], ( 'rotation_method should be one of {orthogonal, oblique}') gamma = method_args[0] if algorithm == 'gpa': vgQ = lambda L=None, A=None, T=None: oblimin_objective( L=L, A=A, T=T, gamma=gamma, return_gradient=True) elif algorithm == 'gpa_der_free': ff = lambda L=None, A=None, T=None: oblimin_objective( L=L, A=A, T=T, gamma=gamma, rotation_method=rotation_method, return_gradient=False) else: raise ValueError('Algorithm %s is not possible for %s ' 'rotation' % (algorithm, method)) elif method == 'CF': assert len(method_args) == 2, ('Both %s family parameter and ' 'rotation_method should be provided' % method) rotation_method = method_args[1] assert rotation_method in ['orthogonal', 'oblique'], ( 'rotation_method should be one of {orthogonal, oblique}') kappa = method_args[0] if algorithm == 'gpa': vgQ = lambda L=None, A=None, T=None: CF_objective( L=L, A=A, T=T, kappa=kappa, rotation_method=rotation_method, return_gradient=True) elif algorithm == 'gpa_der_free': ff = lambda L=None, A=None, T=None: CF_objective( L=L, A=A, T=T, kappa=kappa, rotation_method=rotation_method, return_gradient=False) else: raise ValueError('Algorithm %s is not possible for %s ' 'rotation' % (algorithm, method)) elif method == 'quartimax': return rotate_factors(A, 'orthomax', 0, **algorithm_kwargs) elif method == 'biquartimax': return rotate_factors(A, 'orthomax', 0.5, **algorithm_kwargs) elif method == 'varimax': return rotate_factors(A, 'orthomax', 1, **algorithm_kwargs) elif method == 'equamax': return rotate_factors(A, 'orthomax', 1/p, **algorithm_kwargs) elif method == 'parsimax': return rotate_factors(A, 'CF', (k-1)/(p+k-2), 'orthogonal', **algorithm_kwargs) elif method == 'parsimony': return rotate_factors(A, 'CF', 1, 'orthogonal', **algorithm_kwargs) elif method == 'quartimin': return rotate_factors(A, 'oblimin', 0, 'oblique', **algorithm_kwargs) elif method == 'biquartimin': return rotate_factors(A, 'oblimin', 0.5, 'oblique', **algorithm_kwargs) elif method == 'target': assert len(method_args) == 2, ( 'only the rotation target and orthogonal/oblique should be provide' ' for %s rotation' % method) H = method_args[0] rotation_method = method_args[1] assert rotation_method in ['orthogonal', 'oblique'], ( 'rotation_method should be one of {orthogonal, oblique}') if algorithm == 'gpa': vgQ = lambda L=None, A=None, T=None: vgQ_target( H, L=L, A=A, T=T, rotation_method=rotation_method) elif algorithm == 'gpa_der_free': ff = lambda L=None, A=None, T=None: ff_target( H, L=L, A=A, T=T, rotation_method=rotation_method) elif algorithm == 'analytic': assert rotation_method == 'orthogonal', ( 'For analytic %s rotation only orthogonal rotation is ' 'supported') T = target_rotation(A, H, **algorithm_kwargs) else: raise ValueError('Algorithm %s is not possible for %s rotation' % (algorithm, method)) elif method == 'partial_target': assert len(method_args) == 2, ('2 additional arguments are expected ' 'for %s rotation' % method) H = method_args[0] W = method_args[1] rotation_method = 'orthogonal' if algorithm == 'gpa': vgQ = lambda L=None, A=None, T=None: vgQ_partial_target( H, W=W, L=L, A=A, T=T) elif algorithm == 'gpa_der_free': ff = lambda L=None, A=None, T=None: ff_partial_target( H, W=W, L=L, A=A, T=T) else: raise ValueError('Algorithm %s is not possible for %s ' 'rotation' % (algorithm, method)) else: raise ValueError('Invalid method') # compute L and T if not already done if T is None: L, phi, T, table = GPA(A, vgQ=vgQ, ff=ff, rotation_method=rotation_method, **algorithm_kwargs) if L is None: assert T is not None, 'Cannot compute L without T' L = rotateA(A, T, rotation_method=rotation_method) return L, T
UTF-8
Python
false
false
13,637
py
1,711
_wrappers.py
1,082
0.583339
0.576813
0
353
37.631728
83
sdesai95/web_scrape
3,728,031,661,212
d2bc28f567300c31f221d90ca839d4d8d1bf6200
959098a6a5ccb680ac5088f2da110e55ab4b5485
/1 rss_scrape/scrape_govt.py
ed6fc70bf1a91dc880c1c93f0503c53a1460308c
[]
no_license
https://github.com/sdesai95/web_scrape
f672f2a73d41350e49c4b3c105c126be9ee22343
c76b634f322796b8dddc371163f2613bf5c6e723
refs/heads/master
"2018-07-06T13:06:28.609662"
"2018-07-05T20:10:42"
"2018-07-05T20:10:42"
128,091,180
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import feedparser from send_server import get_time, send_data, play_sound, write_file from get_tickers import get_tickers webpages = {'https://api2.fcc.gov/edocs/public/api/v1/rss/': 'fcc edocuments', 'http://www.imf.org/external/rss/feeds.aspx?category=WHATSNEW': 'imf news', 'https://www.epa.gov/newsreleases/search/rss': 'epa news releases', 'https://www.fda.gov/AboutFDA/ContactFDA/StayInformed/RSSFeeds/FoodSafety/rss.xml': 'fda food safety', 'https://www.fda.gov/AboutFDA/ContactFDA/StayInformed/RSSFeeds/Food/rss.xml': 'fda food news', 'https://www.ferc.gov/xml/whats-new.xml': 'ferc press releases', 'http://www.opec.org/opec_web/en/pressreleases.rss': 'opec press releases', 'https://www.sec.gov/rss/litigation/litreleases.xml': 'sec litigation releases', 'https://www.justice.gov/sites/default/files/pages/attachments/2015/07/20/atr_criminal.xml': 'doj criminal suits', 'https://www.justice.gov/sites/default/files/pages/attachments/2015/07/30/atr_civil_0.xml': 'doj civil suits', 'https://www.wto.org/library/rss/latest_news_e.xml': 'wto press releases', 'https://www.federalreserve.gov/feeds/press_all.xml': 'fed press releases', 'https://www.sec.gov/news/pressreleases.rss': 'sec press releases', 'https://www.eia.gov/rss/press_rss.xml': 'eia press releases', 'https://www.ftc.gov/feeds/press-release.xml': 'ftc press releases', 'https://www.usda.gov/rss/latest-releases.xml': 'usda press releases', 'https://www.fda.gov/AboutFDA/ContactFDA/StayInformed/RSSFeeds/PressReleases/rss.xml': 'fda press releases', 'https://www.fda.gov/AboutFDA/ContactFDA/StayInformed/RSSFeeds/MedWatch/rss.xml': 'fda medwatch', 'https://www.fda.gov/AboutFDA/ContactFDA/StayInformed/RSSFeeds/Recalls/rss.xml': 'fda product recalls', 'https://www.treasury.gov/resource-center/sanctions/OFAC-Enforcement/Documents/ofac.xml': 'treasury sanctions', 'https://www.justice.gov/feeds/justice-news.xml/1664': 'justice department news', 'https://www.hhs.gov/rss/news.xml': 'health human services', 'https://www.sec.gov/rss/litigation/admin.xml': 'sec litigation proceedings', 'https://www.cpsc.gov/Newsroom/CPSC-RSS-Feed/Recalls-RSS': 'product recalls', 'https://www.justice.gov/feeds/justice-news.xml?type%5Bpress_release%5D=press_release&component%5B1981%5D=1981&&organization=186051': 'justice dept news'} def rss_scrape(site, count): rss = feedparser.parse(site) for post in rss.entries: try: if post.link not in links: links.append(post.link) if count != 0: send_data(webpages[site], post.title, get_time(), post.link) play_sound('notify.wav') print(webpages[site], get_time(), post.title, post.link) write_file(get_tickers(post.title), webpages[site], get_time(), post.title, 'scrape_govt') except AttributeError: pass sites = list(webpages.keys()) links = [] count = 0 while True: try: for site in sites: rss_scrape(site, count) count = count + 1 except KeyboardInterrupt: print('Keyboard interrupt.') exit()
UTF-8
Python
false
false
3,402
py
38
scrape_govt.py
37
0.6408
0.627866
0
57
58.684211
166
Sidetalker/SplatPal
2,422,361,596,875
c36d963300b6973a7f170d67b8e12d0c4d5feae7
c6da11a55afd759357e0fa89c41bb154233a873d
/SplatScripts/weaponImageParse.py
f69a2e0b0af2a285958df790fa484a6cd9311eb9
[]
no_license
https://github.com/Sidetalker/SplatPal
8fb40084704cdb318205f85c4e61d5e5de013738
c391c3b0a1118f033570c6870c4aa332ef561b26
refs/heads/master
"2021-05-04T10:32:33.058093"
"2016-03-21T23:19:08"
"2016-03-21T23:19:08"
47,736,068
4
1
null
false
"2016-02-24T03:17:37"
"2015-12-10T03:43:11"
"2016-02-19T13:07:04"
"2016-02-24T03:17:37"
33,249
1
1
13
Swift
null
null
import pickle from shutil import copy from os import listdir src = '/Users/kevin/Downloads/wikiteam-master/splatoonwikiorg_w-20151212-wikidump/images' destA = '/Users/kevin/Documents/github/SplatPal/SplatPal/Images/weapons/' destB = '/Users/kevin/Documents/github/SplatPal/SplatPal/Images/specials/' destC = '/Users/kevin/Documents/github/SplatPal/SplatPal/Images/subs/' images = [f for f in listdir(src)] resultTextA = [] resultTextB = [] resultTextC = [] for f in images: if f[-3:] == 'png' and not 'Beta' in f: if 'Weapont Main' in f: resultTextA.append(f[13:][:-4]) copy(src + '/' + f, destA + 'weapon' + f[12:].replace(' ', '')) if 'Weapon Special' in f: resultTextB.append(f[15:][:-4]) copy(src + '/' + f, destB + 'special' + f[15:].replace(' ', '')) if 'Weapon Sub' in f: resultTextB.append(f[11:][:-4]) copy(src + '/' + f, destC + 'sub' + f[11:].replace(' ', '')) with open('weaponNames.dat', 'wb') as f: pickle.dump(resultTextA, f) with open('specialNames.dat', 'wb') as f: pickle.dump(resultTextB, f) with open('subNames.dat', 'wb') as f: pickle.dump(resultTextC, f)
UTF-8
Python
false
false
1,107
py
65
weaponImageParse.py
24
0.658537
0.636856
0
31
34.741935
89
linlufeng/LufengLearnPython
6,425,271,117,208
2d5c5ea3aa3e8e5b770867a89caea195f7e15c31
692eceac2533150b86aa173b451698b7a12ff735
/PycharmProjects/lession18/Source14.py
976ee2bba655bd9886ad2b7e22e6a5a4102b708d
[]
no_license
https://github.com/linlufeng/LufengLearnPython
cb74f34926663dc9b7d4d6789e6e7e044dd73db3
bedcbf4fea6d048a3903a623a4386ac5d484a70d
refs/heads/master
"2022-09-12T22:14:19.243757"
"2022-08-25T02:54:13"
"2022-08-25T02:54:13"
200,183,327
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python # -*- coding: UTF-8 -*- class A(object): a = 1 b = "b" obj = A() print hasattr(obj, 'a') print hasattr(obj, 'b') print hasattr(obj, 'c') print hash(obj) v = memoryview('abcd') print v print v[1] print v[1:2] print v[1:3].tobytes() print round(3.1415, 1) # 3.1 print round(3.1415, 3) # 3.142 __import__('Source13') print complex(1, 2) print complex("1+2j") print min(1, 3, 5, 0, 10) print min(-10, -99, 100, 0, 5)
UTF-8
Python
false
false
448
py
124
Source14.py
96
0.591518
0.488839
0
30
13.966667
31
bssrdf/pyleet
6,227,702,626,731
d9c38d0e79acd6df275242fd93e858f261ac3a18
cdb7bb6215cc2f362f2e93a040c7d8c5efe97fde
/S/SplitaStringIntotheMaxNumberofUniqueSubstrings.py
8a1adc29ab280cf6a62d060b11d874a9bbaa83c6
[]
no_license
https://github.com/bssrdf/pyleet
8861bbac06dfe0f0f06f6ad1010d99f8def19b27
810575368ecffa97677bdb51744d1f716140bbb1
refs/heads/master
"2023-08-20T05:44:30.130517"
"2023-08-19T21:54:34"
"2023-08-19T21:54:34"
91,913,009
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
''' -Medium- *Backtracking* Given a string s, return the maximum number of unique substrings that the given string can be split into. You can split string s into any list of non-empty substrings, where the concatenation of the substrings forms the original string. However, you must split the substrings such that all of them are unique. A substring is a contiguous sequence of characters within a string. Example 1: Input: s = "ababccc" Output: 5 Explanation: One way to split maximally is ['a', 'b', 'ab', 'c', 'cc']. Splitting like ['a', 'b', 'a', 'b', 'c', 'cc'] is not valid as you have 'a' and 'b' multiple times. Example 2: Input: s = "aba" Output: 2 Explanation: One way to split maximally is ['a', 'ba']. Example 3: Input: s = "aa" Output: 1 Explanation: It is impossible to split the string any further. Constraints: 1 <= s.length <= 16 s contains only lower case English letters. ''' class Solution: def maxUniqueSplit(self, s: str) -> int: # wrong def helper(s): if not s: return [ set() ] l, ret = 0, [] for i in range(len(s)): spt = helper(s[i+1:]) # print(s, i, s[:i+1], s[i+1:], spt) for sp in spt: if sp and s[:i+1] not in sp: ret.append(sp | {s[:i+1]}) # print('x', s, i, spt, ret) if not ret: ret = [ {s} ] # print(s, ret) # if len(s) > sum(len(r) for r in ret): # print(s, ret) return ret return max(len(r) for r in helper(s)) def maxUniqueSplit2(self, s: str) -> int: seen = set() def helper(s, seen): ans = 0 if s: for i in range(1, len(s) + 1): candidate = s[:i] if candidate not in seen: seen.add(candidate) ans = max(ans, 1 + helper(s[i:], seen)) seen.remove(candidate) return ans return helper(s, seen) if __name__ == "__main__": print(Solution().maxUniqueSplit(s = "ccc")) print(Solution().maxUniqueSplit(s = "ababccc")) print(Solution().maxUniqueSplit(s = "aba")) print(Solution().maxUniqueSplit(s = "aa")) print(Solution().maxUniqueSplit(s = "ww")) print(Solution().maxUniqueSplit(s = "sww")) print(Solution().maxUniqueSplit(s = "wwwzfvedwfvhsww")) print(Solution().maxUniqueSplit(s = "bbgmgp")) print(Solution().maxUniqueSplit(s = "nfbbgmgp")) print(Solution().maxUniqueSplit(s = "mbaejekebbb")) print(Solution().maxUniqueSplit(s = "aapmihbdabknhebd")) print(Solution().maxUniqueSplit2(s = "ccc")) print(Solution().maxUniqueSplit2(s = "ababccc")) print(Solution().maxUniqueSplit2(s = "aba")) print(Solution().maxUniqueSplit2(s = "aa")) print(Solution().maxUniqueSplit2(s = "ww")) print(Solution().maxUniqueSplit2(s = "sww")) print(Solution().maxUniqueSplit2(s = "wwwzfvedwfvhsww")) print(Solution().maxUniqueSplit2(s = "bbgmgp")) print(Solution().maxUniqueSplit2(s = "nfbbgmgp")) print(Solution().maxUniqueSplit2(s = "mbaejekebbb")) print(Solution().maxUniqueSplit2(s = "aapmihbdabknhebd"))
UTF-8
Python
false
false
3,351
py
1,959
SplitaStringIntotheMaxNumberofUniqueSubstrings.py
1,951
0.561623
0.552372
0
101
32.178218
203
jagrusy/UnsupervisedLearning
7,576,322,340,777
e01fd7a52a4d165bc2b07cf410981176acfaae09
8dd54e94e0b5bb66f29d01cebe34ea79dd5e271d
/01b_clustering_EM_visualization.py
9074d27352a0084cf17cf7737251a18b720e9410
[]
no_license
https://github.com/jagrusy/UnsupervisedLearning
0670111989f2e15170447a1abd3065d8ffaf0977
1dbac2de7ba6386df636ccda11636f9bdde983be
refs/heads/master
"2020-09-03T04:30:53.670893"
"2019-11-04T01:44:52"
"2019-11-04T01:44:52"
219,386,728
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np import itertools from scipy import linalg import matplotlib.pyplot as plt import matplotlib as mpl from sklearn import mixture from sklearn.datasets import load_digits from util import getCreditCardData, getWineData # Number of samples per component n_samples = 500 # Generate random sample, two components np.random.seed(0) def gmm_bic(X, data_name): lowest_bic = np.infty bic = [] n_components_range = range(1, 10) cv_types = ['spherical', 'tied', 'diag', 'full'] for cv_type in cv_types: for n_components in n_components_range: # Fit a Gaussian mixture with EM gmm = mixture.GaussianMixture(n_components=n_components, covariance_type=cv_type) gmm.fit(X) bic.append(gmm.bic(X)) if bic[-1] < lowest_bic: lowest_bic = bic[-1] best_gmm = gmm bic = np.array(bic) color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue', 'darkorange']) clf = best_gmm bars = [] # Plot the BIC scores plt.figure(figsize=(8, 6)) # spl = plt.subplot(2, 1, 1) for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)): xpos = np.array(n_components_range) + .2 * (i - 2) bars.append(plt.bar(xpos, bic[i * len(n_components_range): (i + 1) * len(n_components_range)], width=.2, color=color)) # plt.xticks(n_components_range) plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()]) xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\ .2 * np.floor(bic.argmin() / len(n_components_range)) plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14) plt.xlabel('Number of components') plt.ylabel('BIC Score') plt.legend([b[0] for b in bars], cv_types) # plt.xticks(()) # plt.yticks(()) plt.title('Gaussian Mixture Model with {} Dataset'.format(data_name)) plt.savefig('Figs/01b_gmm_{}'.format(data_name)) digits = load_digits() X, y = digits.data, digits.target gmm_bic(X, 'Digits') X, y, data = getCreditCardData('./Data/ccdefault.xls') gmm_bic(X, 'Credit Card')
UTF-8
Python
false
false
2,325
py
27
01b_clustering_EM_visualization.py
8
0.568602
0.553118
0
68
32.220588
73
euske/python3-toys
5,652,176,977,000
1453d62a8a4df1295879f9cc177e749845afae9f
c64269774427d81b474b923839c0ed24a8ac38f1
/namemerge.py
0360487d89ea79f2bf96b7c72ab59d333daad2d5
[ "LicenseRef-scancode-public-domain" ]
permissive
https://github.com/euske/python3-toys
ba6be94c61e75473426909d0a23d65b9eb54bf2a
9945f22167e580f6e3ba1dc4a1513d25f2e6bafa
refs/heads/master
"2023-04-01T04:55:20.477855"
"2023-03-27T02:54:28"
"2023-03-27T02:54:28"
45,541,191
8
7
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python ## ## namemerge.py - merge organization names. ## import re FULLWIDTH = ( ' !”#$%&’()*+,\uff0d\u2212./0123456789:;<=>?' '@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_' '‘abcdefghijklmnopqrstuvwxyz{|}' ) HALFWIDTH = ( ' !\"#$%&\'()*+,--./0123456789:;<=>?' '@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_' '`abcdefghijklmnopqrstuvwxyz{|}' ) Z2HMAP = dict( (ord(zc), ord(hc)) for (zc,hc) in zip(FULLWIDTH, HALFWIDTH) ) def zen2han(s): return s.translate(Z2HMAP) ## NameMerger ## class NameMerger: def __init__(self, short_threshold=0.9, long_threshold=0.3): self.short_threshold = short_threshold self.long_threshold = long_threshold self.items = [] self.ss = {} return def add(self, key, *args): if not args: args = key key = zen2han(re.sub(r'\W', '', key)) pid = len(self.items) self.items.append((key, args)) for n in range(1, len(key)+1): for i in range(len(key)-n+1): s = key[i:i+n] if s in self.ss: a = self.ss[s] else: a = self.ss[s] = [] a.append(pid) return def fixate(self): clusters = [] belongs = {} for (s,a) in sorted(self.ss.items(), key=lambda x: len(x[0])): for (i,pid1) in enumerate(a): (key1,_) = self.items[pid1] prop1 = len(s)/len(key1) for pid2 in a[i+1:]: (key2,_) = self.items[pid2] prop2 = len(s)/len(key2) if max(prop1, prop2) < self.short_threshold: continue if min(prop1, prop2) < self.long_threshold: continue if pid1 in belongs: c1 = belongs[pid1] if pid2 in belongs: # merge: c1 <- c2, erase: c2. c2 = belongs[pid2] if c1 is not c2: c1.extend(c2) for pid in c2: belongs[pid] = c1 clusters.remove(c2) else: # join: c1 <- pid2. c1.append(pid2) belongs[pid2] = c1 elif pid2 in belongs: # join: c2 <- pid1. c2 = belongs[pid2] c2.append(pid1) belongs[pid1] = c2 else: # new cluster c = [pid1, pid2] clusters.append(c) belongs[pid1] = c belongs[pid2] = c clusters.sort(key=len, reverse=True) for pid in range(len(self.items)): if pid not in belongs: clusters.append([pid]) for c in clusters: yield [ self.items[pid][1] for pid in c ] return if __name__ == '__main__': import fileinput m = NameMerger() for line in fileinput.input(): m.add(line.strip()) for c in m.fixate(): print(len(c), c)
UTF-8
Python
false
false
3,477
py
28
namemerge.py
27
0.424187
0.397448
0
102
31.264706
76
charlihi/bridges-python
2,791,728,758,618
965c931c9a4366be6bf7a9edd21ba3b6bc89ab84
9d9d627df1ccee130c2bbb13aa04f87166f6c94a
/bridges/audio_clip.py
fad155610542eb5b1383fa40958db241e89e1fa4
[ "MIT" ]
permissive
https://github.com/charlihi/bridges-python
b82c6ad57dda14a3da08e9eec6b6b343cb788d14
6385e84ff754e52662464bf85eb6537fb1ff9a5f
refs/heads/master
"2023-04-08T08:48:05.341114"
"2021-04-14T18:32:45"
"2021-04-14T18:32:45"
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python import base64 import wave import json import array import math from bridges.audio_channel import AudioChannel class AudioClip(object): """ @brief This class provides support for reading, modifying, and playing, audio waveform. This class provides a way to represent an AudioClip (think of a WAV file) in Bridges as waveforms. An AudioClip can be composed of multiple channels: a stereo sound would be composed of 2 channels (Left and Right), a mono sound would be composed of a single channel. A 5.1 sound would be composed of 6 channels. When building an AudioClip from a file, the number of channels is taken from the file; some constructors have a num_channels parameter that enables to pass the number of channels explicitly. If unsure, one can know how many channels are in an audio clip using get_num_channels(). Each channel is essentially a 1D signal. That is to say, it is an array of values that represent how far the membrane of a speaker should be from its resting position. The quality of the sound is controlled by two parameters: sampling rate and sampling depth. Sampling rate tells how many positions per second are encoded by the AudioClip. It is expressed in Hertz. CD quality is 44100Hz; while walkie-talkies use 8000Hz. It is set automatically if read from a file; or it can be passed as the sampleRate parameter to some of the constructors. The sampling rate can be obtained from an AudioClip using get_sample_rate(). The length of an AudioClip is expressed in number of samples. So if an AudioClip is composed of 16,000 samples with a sampling rate of 8000Hz, the clip would be 2 seconds long. The number of samples can obtained with get_sample_count(); it is set from a file or can be passed as the sampleCount parameter of some of the constructor. The sampling depth indicates how many different positions the membrane can take. It is typically expressed in bits with supported values being 8-bit, 16-bit, 24-bit, and 32-bit. If a clip is encoded with a depth of 8 bits, the membrane can take 2^8 different position ranging from -128 to +127, with 0 being the resting position. The sampling depth is read from files or passed as the sampleBits parameter of the constructor. The sampling depth of an existing clip can be obtained with get_sample_bits(). The individual samples are accessed with the get_sample() and set_sample() functions. The samples are integer values in the 2^(get_sample_bits()-1) ; 2^(get_sample_bits()-1)[ range. The functions allow to specify for channel and sample index. @author Luke Sloop, Erik Saule @date 2020, 1/31/2020, 2021 """ def __init__(self, filepath: str="", sample_count: int=0, num_channels: int=1, sample_bits: int=32, sample_rate: int=44100) -> None: """ AudioBase constructor. specify either a filepath or all the other parameters. Args: (str) filepath: name of the wav file to creat a clip of. If this parameter is used, all the other ones are ignored. (int) sample_count: The total number of samples in this audio object (int) num_channels: number of channels (stereo would be 2) (int) sample_rate: The number of samples in 1 second of audio (default to cd quality: 44100) (int) sample_bits: Bit depth, that is to say, each sample will be in the [-2^(get_sample_bits()-1) ; 2^(get_sample_bits()-1)[ range Returns: None """ if filepath != "": self._from_filepath(filepath) return if sample_count > 1000000000: raise ValueError("Maximum frames exceeded with value %d" % self.get_sample_count()) if sample_bits != 8 and sample_bits != 16 and sample_bits != 24 and sample_bits != 32: raise ValueError("sample_bits should be 8, 16, 24, or 32") if num_channels <= 0: raise ValueError("num_channels should be positive") if sample_rate <= 0: raise ValueError("sample_rate should be positive") self.sample_count = sample_count self.sample_rate = sample_rate self.sample_bits = sample_bits self.num_channels = num_channels # Initialize the channels self._channels = [] for i in range(self.num_channels): self._channels.append(AudioChannel(sample_count=self.sample_count, sample_bits=self.sample_bits)) def _from_filepath(self, filepath: str): with wave.open(filepath, "r") as f: self.__init__(sample_count=f.getnframes(), sample_rate=f.getframerate(), num_channels=f.getnchannels(), sample_bits=f.getsampwidth()*8) self.framebytes = f.readframes(f.getnframes()) framebytes = self.framebytes channel = 0 count = 0 for i in range(0, len(framebytes), self.get_sample_bytes()): if self.get_sample_bytes() == 1: val = int.from_bytes(framebytes[i:i+self.get_sample_bytes()], byteorder='little', signed=False) val = val - 128 self.set_sample(channel, count, val) else: val = int.from_bytes(framebytes[i:i+self.get_sample_bytes()], byteorder='little', signed=True) self.set_sample(channel, count, val) channel += 1 if channel >= f.getnchannels(): count += 1 channel = 0 def get_num_channels(self) -> int: """ Return the number of channels in this AudioClip. 1 for mono, 2 for stereo, etc. Returns: int: The number of channels of audio samples this object holds. """ return self.num_channels def get_channel(self, index: int) -> AudioChannel: """ Get the audio channel at index. The index should be less than get_num_channels(). Args: (int) index: The index of the channel to get. 0 for front-left, 1 for front-right, etc. Returns: AudioChannel: The audio channel at index """ return self._channels[index] def get_sample_rate(self) -> int: """ Get the sample rate of this audio clip. This is the number of samples that are taken in one second. Returns: int: The sample rate or number of samples in 1 second of audio """ return self.sample_rate def get_sample_count(self) -> int: """ Get the number of samples in each channel of this audio object. Each channel will contain this number of samples. Returns: int: The total number of samples in this audio object """ return self.sample_count def get_sample(self, channel: int, index: int) -> int: """ Get the sample at the index of the sample data from a specific channel. Args: (int) channel: The index of the channel to get. 0 for front-left, 1 for front-right, etc. (int) index: The index of the sample to get. From 0 - get_sample_count() Returns: int: The sample in the [-2^(get_sample_bits()-1) ; 2^(get_sample_bits()-1)) range """ value = self.get_channel(channel).get_sample(index) return int(value) def set_sample(self, channel: int, index: int, value: int) -> None: """ Set the sample at the index of the sample data to value Args: (int) channel: The index of the channel to get. 0 for front-left, 1 for front-right, etc. (int) index: The index of sampledata to set which must be less than get_sample_count() (int) value: The value to set the sample to which must be a valid signed integer with bit length get_sample_bits(). That is to say in the [-2^(get_sample_bits()-1) ; 2^(get_sample_bits()-1)) range). Returns: None """ if (value < -2**(self.get_sample_bits()-1)) or (value >= 2**(self.get_sample_bits()-1)): raise ValueError("Audio value Out of Bound. Should be in [-2^(get_sample_bits()-1) ; 2^(get_sample_bits()-1)) range") self.get_channel(channel).set_sample(index, int(value)) def get_sample_bits(self) -> int: """ Get the number of bits for the samples in this audio clip. Will be 8, 16, 24, or 32 bits. Returns: int: The number of bits for each sample """ return self.sample_bits def get_sample_bytes(self) -> int: """ Get the number of bytes for the samples in this audio clip. Will be 1, 2, 3, or 4 bits. Returns: int: The number of bytes for each sample """ return self.sample_bits // 8 def _get_type_code(self) -> str: if self.get_sample_bytes() == 1: return "b" elif self.get_sample_bytes() == 2: return "h" elif self.get_sample_bytes() == 3: return "f" elif self.get_sample_bytes() == 4: return "l" else: raise ValueError("Wave file sample bytes of unsupported length %d, supported lengths are 8, 16, 24, and 32 bit" % (self.get_sample_bytes() * 8)) def get_data_structure_type(self) -> str: """ Get the data structure type Returns: str : data structure type """ return "Audio" def get_data_structure_representation(self) -> dict: """ Return a dictionary of the data in this audio file Returns: dict: The data of this audio file """ json_dict = {} json_dict["encoding"] = "RAW" json_dict["numChannels"] = self.num_channels json_dict["sampleRate"] = self.get_sample_rate() json_dict["bitsPerSample"] = self.get_sample_bits() json_dict["numSamples"] = self.get_sample_count() # Combine all channel data framedata = [] for i in range(self.sample_count): for c in range(self.num_channels): # Go straight to channel sample for correct bit data framedata.append(self._channels[c].get_sample(i)) if self.get_sample_bytes() == 4: newarr = [] for val in framedata: minmax32 = (2 ** 32 / 2.0) - 1 minmax16 = (2 ** 16 / 2.0) - 1 newval = (val / minmax32) * minmax16 newarr.append(int(newval)) json_dict["bitsPerSample"] = 16 json_dict["samples"] = base64.b64encode(array.array("h", newarr).tobytes()).decode("utf-8") elif self.get_sample_bytes() != 3: json_dict["samples"] = base64.b64encode(array.array(self._get_type_code(), framedata).tobytes()).decode("utf-8") else: shiftedbytes = bytearray() for sample in framedata: shiftedbytes += int.to_bytes(sample, length=3, byteorder='little', signed=True) json_dict["samples"] = base64.b64encode(shiftedbytes).decode("utf-8") return json_dict def display(self) -> None: """ Print information about this audio file to the console """ print("Num Channels: %d, Sample Rate: %d, Sample Bits: %d, Num Samples: %d" % (self.num_channels, self.sample_rate, self.get_sample_bits(), self.get_sample_count())) def audio_from_json(json_dict: dict) -> 'AudioClip': """ Create an AudioClip from a json dictionary created by another AudioClip object Args: (dict) json_dict: The json dictionary created by another AudioClip object """ audio = AudioClip(sample_count=json_dict["numSamples"], num_channels=json_dict["numChannels"], sample_rate=json_dict["sampleRate"], sample_bits=json_dict["bitsPerSample"]) data = [] if audio.get_sample_bytes() != 3: data = array.array(audio._get_type_code(), base64.b64decode(json_dict["samples"])) else: # No simple type to convert 24 bit base64 string to integer array chunk = base64.b64decode(json_dict["samples"]) for i in range(0, len(chunk), audio.get_sample_bytes()): data.append(int.from_bytes(chunk[i:i+audio.get_sample_bytes()], byteorder='little', signed=True)) for i in range(audio.get_sample_count() * audio.get_num_channels()): audio.set_sample(i % audio.get_num_channels(), int(i / audio.get_num_channels()), (data[i] / ((2 ** audio.get_sample_bits() / 2) - 1) * ((2 ** 32 / 2) - 1))) return audio
UTF-8
Python
false
false
12,807
py
60
audio_clip.py
59
0.60912
0.591083
0
292
42.859589
211